edited_code
stringlengths
17
978k
original_code
stringlengths
17
978k
from discord.ext import commands import discord, random class Events(commands.Cog): def __init__(self, bot): self.bot: commands.Bot = bot @commands.Cog.listener() async def on_ready(self): print("Bot is Ready") print(f"Logged in as {self.bot.user}") print(f"Id: {self.bot.user.id}") @commands.Cog.listener() async def on_guild_join(self, guild): channels = [channel for channel in guild.channels] roles = roles = [role for role in guild.roles] embed = discord.Embed(title=f"Bot just joined {guild.name}", color=random.randint(0, 16777215)) embed.set_thumbnail(url=guild.icon.url if guild.icon else "https://i.imgur.com/3ZUrjUP.png") embed.add_field(name="Server Name:", value=f"{guild.name}") embed.add_field(name="Server ID:", value=f"{guild.id}") embed.add_field(name="Server region:", value=f"{guild.region}") embed.add_field( name="Server Creation Date:", value=f"{discord.utils.format_dt(guild.created_at, style = "d")}\n{discord.utils.format_dt(guild.created_at, style = "T")}", ) embed.add_field(name="Server Owner:", value=f"{guild.owner}") embed.add_field(name="Server Owner ID:", value=f"{guild.owner_id}") embed.add_field(name="Member Count:", value=f"{guild.member_count}") embed.add_field(name="Amount of Channels:", value=f"{len(channels)}") embed.add_field(name="Amount of Roles:", value=f"{len(roles)}") await self.bot.try_channel(947882907068956682).send(embed=embed) @commands.Cog.listener() async def on_guild_remove(self, guild): channels = [channel for channel in guild.channels] roles = roles = [role for role in guild.roles] embed = discord.Embed(title=f"Bot just left : {guild.name}", color=random.randint(0, 16777215)) embed.set_thumbnail(url=guild.icon.url if guild.icon else "https://i.imgur.com/3ZUrjUP.png") embed.add_field(name="Server Name:", value=f"{guild.name}") embed.add_field(name="Server ID:", value=f"{guild.id}") try: embed.add_field(name="Server region:", value=f"{guild.region}") except: pass embed.add_field( name="Server Creation Date:", value=f"{discord.utils.format_dt(guild.created_at, style = "d")}\n{discord.utils.format_dt(guild.created_at, style = "T")}", ) embed.add_field(name="Server Owner:", value=f"{guild.owner}") embed.add_field(name="Server Owner ID:", value=f"{guild.owner_id}") try: embed.add_field(name="Member Count:", value=f"{guild.member_count}") except: pass embed.add_field(name="Amount of Channels:", value=f"{len(channels)}") embed.add_field(name="Amount of Roles:", value=f"{len(roles)}") await self.bot.try_channel(947882907068956682).send(embed=embed) async def setup(bot): await bot.add_cog(Events(bot))
from discord.ext import commands import discord, random class Events(commands.Cog): def __init__(self, bot): self.bot: commands.Bot = bot @commands.Cog.listener() async def on_ready(self): print("Bot is Ready") print(f"Logged in as {self.bot.user}") print(f"Id: {self.bot.user.id}") @commands.Cog.listener() async def on_guild_join(self, guild): channels = [channel for channel in guild.channels] roles = roles = [role for role in guild.roles] embed = discord.Embed(title=f"Bot just joined {guild.name}", color=random.randint(0, 16777215)) embed.set_thumbnail(url=guild.icon.url if guild.icon else "https://i.imgur.com/3ZUrjUP.png") embed.add_field(name="Server Name:", value=f"{guild.name}") embed.add_field(name="Server ID:", value=f"{guild.id}") embed.add_field(name="Server region:", value=f"{guild.region}") embed.add_field( name="Server Creation Date:", value=f"{discord.utils.format_dt(guild.created_at, style = 'd')}\n{discord.utils.format_dt(guild.created_at, style = 'T')}", ) embed.add_field(name="Server Owner:", value=f"{guild.owner}") embed.add_field(name="Server Owner ID:", value=f"{guild.owner_id}") embed.add_field(name="Member Count:", value=f"{guild.member_count}") embed.add_field(name="Amount of Channels:", value=f"{len(channels)}") embed.add_field(name="Amount of Roles:", value=f"{len(roles)}") await self.bot.try_channel(947882907068956682).send(embed=embed) @commands.Cog.listener() async def on_guild_remove(self, guild): channels = [channel for channel in guild.channels] roles = roles = [role for role in guild.roles] embed = discord.Embed(title=f"Bot just left : {guild.name}", color=random.randint(0, 16777215)) embed.set_thumbnail(url=guild.icon.url if guild.icon else "https://i.imgur.com/3ZUrjUP.png") embed.add_field(name="Server Name:", value=f"{guild.name}") embed.add_field(name="Server ID:", value=f"{guild.id}") try: embed.add_field(name="Server region:", value=f"{guild.region}") except: pass embed.add_field( name="Server Creation Date:", value=f"{discord.utils.format_dt(guild.created_at, style = 'd')}\n{discord.utils.format_dt(guild.created_at, style = 'T')}", ) embed.add_field(name="Server Owner:", value=f"{guild.owner}") embed.add_field(name="Server Owner ID:", value=f"{guild.owner_id}") try: embed.add_field(name="Member Count:", value=f"{guild.member_count}") except: pass embed.add_field(name="Amount of Channels:", value=f"{len(channels)}") embed.add_field(name="Amount of Roles:", value=f"{len(roles)}") await self.bot.try_channel(947882907068956682).send(embed=embed) async def setup(bot): await bot.add_cog(Events(bot))
import json import os from pathlib import Path import subprocess as sp import sys from textwrap import dedent import venv assert __name__ == "__main__" name_kernel = "binary-embedding" path_venv = Path(".venv").resolve() if "-h" in sys.argv or "--help" in sys.argv: print( dedent(f"""\ Prepare an isolated environment for running the binary similarity notebook. Usage: {sys.executable} {__file__} [ARG] If the Python virtual environment is active, ARG designates the name to give to the Jupyter kernel in which to run the notebook; by default, we use the name `{name_kernel}'. If the Python virtual environment is not yet active, ARG designates the path in which to set up the virtual environment. By default, we use {path_venv} Once the environment has been created, you should activate it (according to instructions in https://docs.python.org/3/library/venv.html) and run this script again to prepare the Jupyter kernel. """.rstrip()), file=sys.stderr ) sys.exit(0) if "VIRTUAL_ENV" in os.environ: # Environment is active. if len(sys.argv) >= 2: name_kernel = sys.argv[1] jupyter = [] ipykernel = [] r = sp.run( ["jupyter", "kernelspec", "list", "--json"], encoding="utf-8", stdout=sp.PIPE ) if r.returncode == 0: specs = set(json.loads(r.stdout).get("kernelspecs", {}).keys()) if name_kernel not in specs: ipykernel = ["ipykernel"] else: jupyter = ["jupyterlab", "ipykernel"] try: if jupyter or ipykernel: print(f"Must install: {" ".join([*jupyter, *ipykernel])}") sp.run(["pip", "install", *jupyter, *ipykernel], check=True) sp.run( [ "python", "-m", "ipykernel", "install", "--user", "--name", name_kernel ], check=True ) print() r = sp.run( ["pip", "list", "--format", "json"], check=True, encoding="utf-8", stdout=sp.PIPE ) dependencies = set([p.get("name", "") for p in json.loads(r.stdout)]) if "jupyterlab" in dependencies: print("Ready to go! Run `jupyter lab' to get started.") else: print("Kernel deployed! Load the notebook in your running Jupyter and set") print(f"the kernel to {name_kernel} to get going.") except sp.CalledProcessError as err: sys.exit(err.returncode) else: # Environment is not active. if len(sys.argv) >= 2: path_venv = Path(sys.argv[1]) if str(path_venv).startswith("-"): print(f"Invalid environment path: {path_venv}") sys.exit(28) if not path_venv.is_dir(): print("Creating virtual environment") venv.create(path_venv, with_pip=True, upgrade_deps=True) print() print("*** Environment ready! Activate it, then run this script once again to finalize setup. ***")
import json import os from pathlib import Path import subprocess as sp import sys from textwrap import dedent import venv assert __name__ == "__main__" name_kernel = "binary-embedding" path_venv = Path(".venv").resolve() if "-h" in sys.argv or "--help" in sys.argv: print( dedent(f"""\ Prepare an isolated environment for running the binary similarity notebook. Usage: {sys.executable} {__file__} [ARG] If the Python virtual environment is active, ARG designates the name to give to the Jupyter kernel in which to run the notebook; by default, we use the name `{name_kernel}'. If the Python virtual environment is not yet active, ARG designates the path in which to set up the virtual environment. By default, we use {path_venv} Once the environment has been created, you should activate it (according to instructions in https://docs.python.org/3/library/venv.html) and run this script again to prepare the Jupyter kernel. """.rstrip()), file=sys.stderr ) sys.exit(0) if "VIRTUAL_ENV" in os.environ: # Environment is active. if len(sys.argv) >= 2: name_kernel = sys.argv[1] jupyter = [] ipykernel = [] r = sp.run( ["jupyter", "kernelspec", "list", "--json"], encoding="utf-8", stdout=sp.PIPE ) if r.returncode == 0: specs = set(json.loads(r.stdout).get("kernelspecs", {}).keys()) if name_kernel not in specs: ipykernel = ["ipykernel"] else: jupyter = ["jupyterlab", "ipykernel"] try: if jupyter or ipykernel: print(f"Must install: {' '.join([*jupyter, *ipykernel])}") sp.run(["pip", "install", *jupyter, *ipykernel], check=True) sp.run( [ "python", "-m", "ipykernel", "install", "--user", "--name", name_kernel ], check=True ) print() r = sp.run( ["pip", "list", "--format", "json"], check=True, encoding="utf-8", stdout=sp.PIPE ) dependencies = set([p.get("name", "") for p in json.loads(r.stdout)]) if "jupyterlab" in dependencies: print("Ready to go! Run `jupyter lab' to get started.") else: print("Kernel deployed! Load the notebook in your running Jupyter and set") print(f"the kernel to {name_kernel} to get going.") except sp.CalledProcessError as err: sys.exit(err.returncode) else: # Environment is not active. if len(sys.argv) >= 2: path_venv = Path(sys.argv[1]) if str(path_venv).startswith("-"): print(f"Invalid environment path: {path_venv}") sys.exit(28) if not path_venv.is_dir(): print("Creating virtual environment") venv.create(path_venv, with_pip=True, upgrade_deps=True) print() print("*** Environment ready! Activate it, then run this script once again to finalize setup. ***")
#------------------------------------------------------------------------------------------------------------------------------- # COVID19 PREDICTION IN INDIA # FILE NAME: test.py # DEVELOPED BY: Vigneshwar Ravichandar # TOPICS: Regression, Machine Learning, TensorFlow #------------------------------------------------------------------------------------------------------------------------------- # IMPORTING REQUIRED MODULES import numpy as np import pandas as pd from datetime import datetime import tensorflow as tf from tensorflow.keras.models import load_model from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split print(f"TensorFlow version: {tf.__version__}") DATASET_PATH = "data/covid_data.csv" # IMPORTING THE DATASET data = pd.read_csv(DATASET_PATH) # SEGMENTING THE DATA x = data.iloc[:,1].values y1 = data.iloc[:,2].values y2 = data.iloc[:,3].values print("Dataset Description:\n",data.describe()) print("Dataset Head\n",data.head()) # RESHAPING THE DATA x = np.reshape(x, (-1,1)) y1 = np.reshape(y1, (-1,1)) y2 = np.reshape(y2, (-1,1)) # SCALING THE DATA x_sc = StandardScaler() y1_sc = StandardScaler() y2_sc = StandardScaler() x_sc.fit(x) y1_sc.fit(y1) y2_sc.fit(y2) # DEFINING THE TRAINED MODEL model_c = tf.keras.models.load_model("model/model_cases", custom_objects=None, compile=True) model_d = tf.keras.models.load_model("model/model_deaths", custom_objects=None, compile=True) # PREDICTING VALUES USING TRAINED MODEL fn_date = str(input("Enter the date to be predicted in the format DD-MM-YYYY: ")) date1 = datetime(2020,3,2) date2 = datetime(int(fn_date[6:10]),int(fn_date[3:5]),int(fn_date[0:2])) diff = (date2-date1).days diff = np.array(diff) diff = np.reshape(diff, (-1,1)) diff_sc = x_sc.transform(diff) res1_sc = model_c.predict(diff_sc) res2_sc = model_d.predict(diff_sc) res1 = y1_sc.inverse_transform(res1_sc) res2 = y2_sc.inverse_transform(res2_sc) print(f"The estimated number of cases in day {date2.strftime("%d-%m-%Y")} is {"{:,}".format(int(res1))}") print(f"The estimated number of deaths in day {date2.strftime("%d-%m-%Y")} is {"{:,}".format(int(res2))}")
#------------------------------------------------------------------------------------------------------------------------------- # COVID19 PREDICTION IN INDIA # FILE NAME: test.py # DEVELOPED BY: Vigneshwar Ravichandar # TOPICS: Regression, Machine Learning, TensorFlow #------------------------------------------------------------------------------------------------------------------------------- # IMPORTING REQUIRED MODULES import numpy as np import pandas as pd from datetime import datetime import tensorflow as tf from tensorflow.keras.models import load_model from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split print(f"TensorFlow version: {tf.__version__}") DATASET_PATH = "data/covid_data.csv" # IMPORTING THE DATASET data = pd.read_csv(DATASET_PATH) # SEGMENTING THE DATA x = data.iloc[:,1].values y1 = data.iloc[:,2].values y2 = data.iloc[:,3].values print("Dataset Description:\n",data.describe()) print("Dataset Head\n",data.head()) # RESHAPING THE DATA x = np.reshape(x, (-1,1)) y1 = np.reshape(y1, (-1,1)) y2 = np.reshape(y2, (-1,1)) # SCALING THE DATA x_sc = StandardScaler() y1_sc = StandardScaler() y2_sc = StandardScaler() x_sc.fit(x) y1_sc.fit(y1) y2_sc.fit(y2) # DEFINING THE TRAINED MODEL model_c = tf.keras.models.load_model("model/model_cases", custom_objects=None, compile=True) model_d = tf.keras.models.load_model("model/model_deaths", custom_objects=None, compile=True) # PREDICTING VALUES USING TRAINED MODEL fn_date = str(input("Enter the date to be predicted in the format DD-MM-YYYY: ")) date1 = datetime(2020,3,2) date2 = datetime(int(fn_date[6:10]),int(fn_date[3:5]),int(fn_date[0:2])) diff = (date2-date1).days diff = np.array(diff) diff = np.reshape(diff, (-1,1)) diff_sc = x_sc.transform(diff) res1_sc = model_c.predict(diff_sc) res2_sc = model_d.predict(diff_sc) res1 = y1_sc.inverse_transform(res1_sc) res2 = y2_sc.inverse_transform(res2_sc) print(f"The estimated number of cases in day {date2.strftime('%d-%m-%Y')} is {'{:,}'.format(int(res1))}") print(f"The estimated number of deaths in day {date2.strftime('%d-%m-%Y')} is {'{:,}'.format(int(res2))}")
from datetime import datetime import flask_login as login from flask import Blueprint, Flask, Markup, redirect, request, url_for from flask_admin import Admin, AdminIndexView, expose, helpers from flask_admin.contrib.sqla import ModelView from werkzeug.security import generate_password_hash from wtforms import fields, form from redata import settings from redata.checks.data_schema import check_for_new_tables from redata.conf import Conf from redata.db_operations import metrics_session from redata.grafana.grafana_setup import create_dashboards from redata.models import Alert, Check, DataSource, Scan, Table, User from redata.ui_admin.forms import LoginForm redata_blueprint = Blueprint("route_blueprint", __name__) # Initialize flask-login def init_login(app): login_manager = login.LoginManager() login_manager.init_app(app) @app.teardown_request def teardown_request(*args, **kwargs): "Expire and remove the session after each request" metrics_session.expire_all() # Create user loader function @login_manager.user_loader def load_user(user_id): return metrics_session.query(User).get(user_id) def init_admin(app): init_login(app) admin = Admin( app, name="Redata", index_view=RedataAdminView(), template_mode="bootstrap3", base_template="redata_master.html", ) admin.add_view(AlertView(Alert, metrics_session)) admin.add_view(TableView(Table, metrics_session)) admin.add_view(ChecksTableView(Check, metrics_session)) admin.add_view(DataSourceView(DataSource, metrics_session)) admin.add_view(ScanView(Scan, metrics_session)) def create_app(): app = Flask(__name__) # set optional bootswatch theme app.config["FLASK_ADMIN_SWATCH"] = "cerulean" app.config["SQLALCHEMY_DATABASE_URI"] = settings.METRICS_DB_URL app.config["SECRET_KEY"] = settings.FLASK_UI_SECRET_KEY app.config["SESSION_COOKIE_NAME"] = "redata_seesion_cookie" app.route(admin_redirect, endpoint="/") app.register_blueprint(redata_blueprint) init_admin(app) return app def grafana_url_formatter_fun(table): if table.grafana_url: url = f"<a href='http://{settings.GRAFNA_URL}{table.grafana_url}' target='_blank'>{table.grafana_url}</a>" return Markup(url) else: return "Not yet created" def table_details_link_formatter(table): url_for_details = url_for("table.details_view", id=table.id) return Markup(f'<a href="{url_for_details}">{table.full_table_name}</a>') @redata_blueprint.route("/") def admin_redirect(): return redirect("/admin") class RedataAdminView(AdminIndexView): def is_visible(self): # This view won't appear in the menu structure return False @expose("/") def index(self): if not login.current_user.is_authenticated: return redirect(url_for(".login_view")) return super(RedataAdminView, self).index() @expose("/login/", methods=("GET", "POST")) def login_view(self): # handle user login form = LoginForm(request.form) if helpers.validate_form_on_submit(form): user = form.get_user() login.login_user(user) if login.current_user.is_authenticated: return redirect(url_for(".index")) self._template_args["form"] = form return super(RedataAdminView, self).index() @expose("/logout/") def logout_view(self): login.logout_user() return redirect(url_for(".index")) class BaseRedataView(ModelView): page_size = 1000 def _user_formatter_time(self, context, model, name): if model.created_at: return model.created_at.strftime("%Y-%m-%d %H:%M:%S") else: return "" column_formatters = {"created_at": _user_formatter_time} class TableView(BaseRedataView): can_delete = False can_view_details = True can_create = False def is_accessible(self): return login.current_user.is_authenticated def grafana_url_formatter(self, context, model, name): return grafana_url_formatter_fun(model) def schema_formatter(self, context, model, schema): max_length = max([len(x["name"]) for x in model.schema["columns"]]) str_repr = "<br/>".join( f"{row["name"].ljust(max_length + 2, "§")} [{row["type"]}]" for row in model.schema["columns"] ) str_repr = str_repr.replace("§", "&nbsp;") return Markup('<div class="schema-repr">' + str_repr + "</div") def alerts_formatter(self, context, model, schema): table_alerts = [] for alert in model.alerts_by_creation: table_alerts.append(f"[{alert.created_at}] {alert.text}") str_rep = "<br/>".join(table_alerts) return Markup('<dev class="alerts-repr">' + str_rep + "</div>") def schema_change_formatter(self, context, model, schema): table_changes = [] for el in model.schema_changes: event = el.result if event["value"]["operation"] == "table detected": continue table_changes.append(f'[{el.created_at}] {event['value']}') str_rep = "<br/>".join(table_changes) return Markup('<dev class="schema-changes-repr">' + str_rep + "</div>") def alerts_number_formatter(self, context, model, shcema): url_for_details = url_for("table.details_view", id=model.id) return Markup(f'<a href="{url_for_details}">{model.alerts_number}</a>') def last_record_added_formatter(self, context, model, schema): metric = model.last_records_added if not metric: return None minutes = metric.result["value"] / 60 return Markup( f'<dev class="last-record">[{metric.created_at}], last_record_added - {minutes:.2f} minutes ago</div>' ) column_searchable_list = ("source_db", "table_name", "namespace") column_editable_list = ["active", "time_column"] column_exclude_list = ["schema", "created_at"] column_list = [ "source_db", "active", "table_name", "time_column", "alerts_number", "grafana_url", ] column_details_list = [ "source_db", "active", "table_name", "schema", "schema_changes", "alerts_by_creation", "last_records_added", "grafana_url", ] column_formatters = { "created_at": BaseRedataView._user_formatter_time, "schema": schema_formatter, "grafana_url": grafana_url_formatter, "alerts_by_creation": alerts_formatter, "alerts_number": alerts_number_formatter, "last_records_added": last_record_added_formatter, "schema_changes": schema_change_formatter, } class AlertView(BaseRedataView): can_delete = True can_create = False can_edit = False can_view_details = True column_searchable_list = ("text", "alert_type") column_list = [ "created_at", "text", "alert_type", "table", ] def table_details_formatter(self, context, model, name): return table_details_link_formatter(model.table) def is_accessible(self): return login.current_user.is_authenticated column_formatters = { "created_at": BaseRedataView._user_formatter_time, "table": table_details_formatter, } class DataSourceView(BaseRedataView): can_delete = True can_create = True column_searchable_list = ("name",) column_exclude_list = "password" form_widget_args = { "password": {"type": "password"}, } form_choices = {"source_type": DataSource.SUPPORTED_SOURCES} def after_model_change(self, form, model, is_created): # Discover tables and added data source conf = Conf(datetime.utcnow()) db = model.get_db_object() check_for_new_tables(db, conf) create_dashboards() def is_accessible(self): return login.current_user.is_authenticated class ChecksTableView(BaseRedataView): can_delete = False column_searchable_list = ("name", "metrics", "query") def table_details_formatter(self, context, model, name): return table_details_link_formatter(model.table) def is_accessible(self): return login.current_user.is_authenticated column_formatters = { "created_at": BaseRedataView._user_formatter_time, "table": table_details_formatter, } class ScanView(BaseRedataView): can_delete = False form_excluded_columns = ("created_at", "status", "run_type") def is_accessible(self): return login.current_user.is_authenticated if __name__ == "__main__": app = create_app() app.run(host="0.0.0.0", port=5001, debug=True)
from datetime import datetime import flask_login as login from flask import Blueprint, Flask, Markup, redirect, request, url_for from flask_admin import Admin, AdminIndexView, expose, helpers from flask_admin.contrib.sqla import ModelView from werkzeug.security import generate_password_hash from wtforms import fields, form from redata import settings from redata.checks.data_schema import check_for_new_tables from redata.conf import Conf from redata.db_operations import metrics_session from redata.grafana.grafana_setup import create_dashboards from redata.models import Alert, Check, DataSource, Scan, Table, User from redata.ui_admin.forms import LoginForm redata_blueprint = Blueprint("route_blueprint", __name__) # Initialize flask-login def init_login(app): login_manager = login.LoginManager() login_manager.init_app(app) @app.teardown_request def teardown_request(*args, **kwargs): "Expire and remove the session after each request" metrics_session.expire_all() # Create user loader function @login_manager.user_loader def load_user(user_id): return metrics_session.query(User).get(user_id) def init_admin(app): init_login(app) admin = Admin( app, name="Redata", index_view=RedataAdminView(), template_mode="bootstrap3", base_template="redata_master.html", ) admin.add_view(AlertView(Alert, metrics_session)) admin.add_view(TableView(Table, metrics_session)) admin.add_view(ChecksTableView(Check, metrics_session)) admin.add_view(DataSourceView(DataSource, metrics_session)) admin.add_view(ScanView(Scan, metrics_session)) def create_app(): app = Flask(__name__) # set optional bootswatch theme app.config["FLASK_ADMIN_SWATCH"] = "cerulean" app.config["SQLALCHEMY_DATABASE_URI"] = settings.METRICS_DB_URL app.config["SECRET_KEY"] = settings.FLASK_UI_SECRET_KEY app.config["SESSION_COOKIE_NAME"] = "redata_seesion_cookie" app.route(admin_redirect, endpoint="/") app.register_blueprint(redata_blueprint) init_admin(app) return app def grafana_url_formatter_fun(table): if table.grafana_url: url = f"<a href='http://{settings.GRAFNA_URL}{table.grafana_url}' target='_blank'>{table.grafana_url}</a>" return Markup(url) else: return "Not yet created" def table_details_link_formatter(table): url_for_details = url_for("table.details_view", id=table.id) return Markup(f'<a href="{url_for_details}">{table.full_table_name}</a>') @redata_blueprint.route("/") def admin_redirect(): return redirect("/admin") class RedataAdminView(AdminIndexView): def is_visible(self): # This view won't appear in the menu structure return False @expose("/") def index(self): if not login.current_user.is_authenticated: return redirect(url_for(".login_view")) return super(RedataAdminView, self).index() @expose("/login/", methods=("GET", "POST")) def login_view(self): # handle user login form = LoginForm(request.form) if helpers.validate_form_on_submit(form): user = form.get_user() login.login_user(user) if login.current_user.is_authenticated: return redirect(url_for(".index")) self._template_args["form"] = form return super(RedataAdminView, self).index() @expose("/logout/") def logout_view(self): login.logout_user() return redirect(url_for(".index")) class BaseRedataView(ModelView): page_size = 1000 def _user_formatter_time(self, context, model, name): if model.created_at: return model.created_at.strftime("%Y-%m-%d %H:%M:%S") else: return "" column_formatters = {"created_at": _user_formatter_time} class TableView(BaseRedataView): can_delete = False can_view_details = True can_create = False def is_accessible(self): return login.current_user.is_authenticated def grafana_url_formatter(self, context, model, name): return grafana_url_formatter_fun(model) def schema_formatter(self, context, model, schema): max_length = max([len(x["name"]) for x in model.schema["columns"]]) str_repr = "<br/>".join( f"{row['name'].ljust(max_length + 2, '§')} [{row['type']}]" for row in model.schema["columns"] ) str_repr = str_repr.replace("§", "&nbsp;") return Markup('<div class="schema-repr">' + str_repr + "</div") def alerts_formatter(self, context, model, schema): table_alerts = [] for alert in model.alerts_by_creation: table_alerts.append(f"[{alert.created_at}] {alert.text}") str_rep = "<br/>".join(table_alerts) return Markup('<dev class="alerts-repr">' + str_rep + "</div>") def schema_change_formatter(self, context, model, schema): table_changes = [] for el in model.schema_changes: event = el.result if event["value"]["operation"] == "table detected": continue table_changes.append(f'[{el.created_at}] {event["value"]}') str_rep = "<br/>".join(table_changes) return Markup('<dev class="schema-changes-repr">' + str_rep + "</div>") def alerts_number_formatter(self, context, model, shcema): url_for_details = url_for("table.details_view", id=model.id) return Markup(f'<a href="{url_for_details}">{model.alerts_number}</a>') def last_record_added_formatter(self, context, model, schema): metric = model.last_records_added if not metric: return None minutes = metric.result["value"] / 60 return Markup( f'<dev class="last-record">[{metric.created_at}], last_record_added - {minutes:.2f} minutes ago</div>' ) column_searchable_list = ("source_db", "table_name", "namespace") column_editable_list = ["active", "time_column"] column_exclude_list = ["schema", "created_at"] column_list = [ "source_db", "active", "table_name", "time_column", "alerts_number", "grafana_url", ] column_details_list = [ "source_db", "active", "table_name", "schema", "schema_changes", "alerts_by_creation", "last_records_added", "grafana_url", ] column_formatters = { "created_at": BaseRedataView._user_formatter_time, "schema": schema_formatter, "grafana_url": grafana_url_formatter, "alerts_by_creation": alerts_formatter, "alerts_number": alerts_number_formatter, "last_records_added": last_record_added_formatter, "schema_changes": schema_change_formatter, } class AlertView(BaseRedataView): can_delete = True can_create = False can_edit = False can_view_details = True column_searchable_list = ("text", "alert_type") column_list = [ "created_at", "text", "alert_type", "table", ] def table_details_formatter(self, context, model, name): return table_details_link_formatter(model.table) def is_accessible(self): return login.current_user.is_authenticated column_formatters = { "created_at": BaseRedataView._user_formatter_time, "table": table_details_formatter, } class DataSourceView(BaseRedataView): can_delete = True can_create = True column_searchable_list = ("name",) column_exclude_list = "password" form_widget_args = { "password": {"type": "password"}, } form_choices = {"source_type": DataSource.SUPPORTED_SOURCES} def after_model_change(self, form, model, is_created): # Discover tables and added data source conf = Conf(datetime.utcnow()) db = model.get_db_object() check_for_new_tables(db, conf) create_dashboards() def is_accessible(self): return login.current_user.is_authenticated class ChecksTableView(BaseRedataView): can_delete = False column_searchable_list = ("name", "metrics", "query") def table_details_formatter(self, context, model, name): return table_details_link_formatter(model.table) def is_accessible(self): return login.current_user.is_authenticated column_formatters = { "created_at": BaseRedataView._user_formatter_time, "table": table_details_formatter, } class ScanView(BaseRedataView): can_delete = False form_excluded_columns = ("created_at", "status", "run_type") def is_accessible(self): return login.current_user.is_authenticated if __name__ == "__main__": app = create_app() app.run(host="0.0.0.0", port=5001, debug=True)
""" SiliconLife Eyeflow Class for log batch of extracted images from detection Author: Alex Sobral de Freitas """ import os import json import datetime import pytz import random import cv2 import importlib from pymongo import MongoClient from bson import ObjectId from eyeflow_sdk.file_access import FileAccess from eyeflow_sdk.img_utils import resize_image_scale from eyeflow_sdk.log_obj import log #---------------------------------------------------------------------------------------------------------------------------------- MAX_EXTRACT_FILES = 800 THUMB_SIZE = 128 def clear_log(extract_path, max_files=MAX_EXTRACT_FILES): files_list = os.listdir(extract_path) if len(files_list) > max_files: date_list = [(filename, datetime.datetime.fromtimestamp(os.path.getmtime(os.path.join(extract_path, filename)))) for filename in files_list] exclude_list = sorted(date_list, key=lambda x: x[1])[:len(files_list) - max_files] for filename, _ in exclude_list: try: os.remove(os.path.join(extract_path, filename)) except: pass #---------------------------------------------------------------------------------------------------------------------------------- def upload_extracts(dataset_id, db_config, cloud_parms, max_files=MAX_EXTRACT_FILES): """ Upload extracts of process to cloud """ log.info(f"Upload extracts dataset: {dataset_id}") comp_lib = importlib.import_module(f'eyeflow_sdk.cloud_store.{cloud_parms['provider']}') cloud_obj = comp_lib.Connector(**cloud_parms) def generate_extract_thumbs(extract_path): """ Generate thumb image for all image files in extract folder """ thumbs_list = [fname for fname in os.listdir(extract_path) if fname.endswith('_thumb.jpg')] for filename in os.listdir(extract_path): file_thumb = filename[:-4] + "_thumb.jpg" if filename.endswith('.jpg') and filename not in thumbs_list and file_thumb not in thumbs_list: img = cv2.imread(os.path.join(extract_path, filename)) if max(img.shape) > THUMB_SIZE: img, _ = resize_image_scale(img, THUMB_SIZE) cv2.imwrite(os.path.join(extract_path, file_thumb), img) def save_extract_list(extract_path): """ Save a json with info about all files in extract folder """ files_data = [] files_list = [] files_time = [] file_list = [f for f in os.listdir(extract_path)] for filename in file_list: exp_id = filename[:24] if filename.endswith('_data.json') and (exp_id + ".jpg") in file_list and (exp_id + "_thumb.jpg") in file_list: try: filepath = os.path.join(extract_path, filename) with open(filepath, 'r') as json_file: data = json.load(json_file) files_data.append(data) if 'date' in data: file_time = data['date'] else: file_time = datetime.datetime.fromtimestamp(os.path.getmtime(filepath)).strftime("%Y-%m-%d %H:%M:%S.%f") files_list.append(filename) files_time.append(file_time) except: pass cloud_files = cloud_obj.list_files_info(folder="extract", resource_id=dataset_id) file_list = [f["filename"] for f in cloud_files] for cloud_file in cloud_files: exp_id = cloud_file["filename"][:24] if cloud_file["filename"].endswith('_data.json') \ and cloud_file["filename"] not in files_list \ and (exp_id + ".jpg") in file_list \ and (exp_id + "_thumb.jpg") in file_list: try: data = json.loads(cloud_obj.download_file(folder="extract", resource_id=dataset_id, filename=cloud_file["filename"])) files_data.append(data) if 'date' in data: file_time = data['date'] else: file_time = cloud_file["creation_date"].strftime("%Y-%m-%d %H:%M:%S.%f") files_list.append(filename) files_time.append(file_time) except: pass extract_files = { "files_data": files_data, "extract_list": sorted(zip(files_list, files_time), key=lambda x: x[1], reverse=True) } # save extract info in storage with open(os.path.join(extract_path, 'extract_files.json'), 'w', newline='', encoding='utf8') as file_p: json.dump(extract_files, file_p, ensure_ascii=False, default=str) # save extract info in database client = MongoClient(db_config["db_url"]) db_mongo = client[db_config["db_name"]] db_mongo.extract.delete_one({"_id": ObjectId(dataset_id)}) extract_files["_id"] = ObjectId(dataset_id) db_mongo.extract.insert_one(extract_files) file_ac = FileAccess(storage="extract", resource_id=dataset_id, cloud_parms=cloud_parms) # clear_log(file_ac.get_local_folder()) file_ac.purge_files(max_files=max_files) generate_extract_thumbs(file_ac.get_local_folder()) file_ac.sync_files(origin="local") save_extract_list(file_ac.get_local_folder()) #---------------------------------------------------------------------------------------------------------------------------------- class VideoLog(object): def __init__(self, dataset_id, vlog_size): self._vlog_size = vlog_size file_ac = FileAccess(storage="extract", resource_id=dataset_id) self._dest_path = file_ac.get_local_folder() self._dataset_id = dataset_id self._last_log = datetime.datetime(2000, 1, 1) def log_batch(self, image_batch, output_batch, annotations): for idx, image in enumerate(image_batch): if random.random() < float(self._vlog_size): obj_id = str(ObjectId()) filename = obj_id + '.jpg' file_thumb = filename[:-4] + "_thumb.jpg" if isinstance(image, dict): cv2.imwrite(os.path.join(self._dest_path, filename), image["input_image"]) file_stat_img = os.stat(os.path.join(self._dest_path, filename)) if max(image["input_image"].shape) > THUMB_SIZE: img, _ = resize_image_scale(image["input_image"], THUMB_SIZE) cv2.imwrite(os.path.join(self._dest_path, file_thumb), img) else: cv2.imwrite(os.path.join(self._dest_path, file_thumb), image["input_image"]) file_stat_thumb = os.stat(os.path.join(self._dest_path, file_thumb)) img_data = { "_id": obj_id, "date": pytz.utc.localize(datetime.datetime.now()), "img_height": image["input_image"].shape[0], "img_width": image["input_image"].shape[1], "file_size": file_stat_img.st_size, "thumb_size": file_stat_thumb.st_size, "annotations": annotations[idx] } if 'frame_time' in image["frame_data"]: img_data['frame_time'] = image["frame_data"]['frame_time'] if 'video_data' in image["frame_data"]: img_data['video_data'] = image["frame_data"]['video_data'] else: cv2.imwrite(os.path.join(self._dest_path, filename), image[0]) file_stat_img = os.stat(os.path.join(self._dest_path, filename)) if max(image[0].shape) > THUMB_SIZE: img, _ = resize_image_scale(image[0], THUMB_SIZE) cv2.imwrite(os.path.join(self._dest_path, file_thumb), img) else: cv2.imwrite(os.path.join(self._dest_path, file_thumb), image[0]) file_stat_thumb = os.stat(os.path.join(self._dest_path, file_thumb)) img_data = { "_id": obj_id, "date": pytz.utc.localize(datetime.datetime.now()), "img_height": image[0].shape[0], "img_width": image[0].shape[1], "file_size": file_stat_img.st_size, "thumb_size": file_stat_thumb.st_size, "annotations": annotations[idx] } if 'frame_time' in image[2]: img_data['frame_time'] = image[2]['frame_time'] if 'video_file' in image[2]: img_data['video_file'] = image[2]['video_file'] with open(os.path.join(self._dest_path, obj_id + '_data.json'), 'w', newline='', encoding='utf8') as file_p: json.dump(img_data, file_p, ensure_ascii=False, default=str) if (datetime.datetime.now() - self._last_log) > datetime.timedelta(minutes=1): clear_log(self._dest_path) self._last_log = datetime.datetime.now() #----------------------------------------------------------------------------------------------------------------------------------
""" SiliconLife Eyeflow Class for log batch of extracted images from detection Author: Alex Sobral de Freitas """ import os import json import datetime import pytz import random import cv2 import importlib from pymongo import MongoClient from bson import ObjectId from eyeflow_sdk.file_access import FileAccess from eyeflow_sdk.img_utils import resize_image_scale from eyeflow_sdk.log_obj import log #---------------------------------------------------------------------------------------------------------------------------------- MAX_EXTRACT_FILES = 800 THUMB_SIZE = 128 def clear_log(extract_path, max_files=MAX_EXTRACT_FILES): files_list = os.listdir(extract_path) if len(files_list) > max_files: date_list = [(filename, datetime.datetime.fromtimestamp(os.path.getmtime(os.path.join(extract_path, filename)))) for filename in files_list] exclude_list = sorted(date_list, key=lambda x: x[1])[:len(files_list) - max_files] for filename, _ in exclude_list: try: os.remove(os.path.join(extract_path, filename)) except: pass #---------------------------------------------------------------------------------------------------------------------------------- def upload_extracts(dataset_id, db_config, cloud_parms, max_files=MAX_EXTRACT_FILES): """ Upload extracts of process to cloud """ log.info(f"Upload extracts dataset: {dataset_id}") comp_lib = importlib.import_module(f'eyeflow_sdk.cloud_store.{cloud_parms["provider"]}') cloud_obj = comp_lib.Connector(**cloud_parms) def generate_extract_thumbs(extract_path): """ Generate thumb image for all image files in extract folder """ thumbs_list = [fname for fname in os.listdir(extract_path) if fname.endswith('_thumb.jpg')] for filename in os.listdir(extract_path): file_thumb = filename[:-4] + "_thumb.jpg" if filename.endswith('.jpg') and filename not in thumbs_list and file_thumb not in thumbs_list: img = cv2.imread(os.path.join(extract_path, filename)) if max(img.shape) > THUMB_SIZE: img, _ = resize_image_scale(img, THUMB_SIZE) cv2.imwrite(os.path.join(extract_path, file_thumb), img) def save_extract_list(extract_path): """ Save a json with info about all files in extract folder """ files_data = [] files_list = [] files_time = [] file_list = [f for f in os.listdir(extract_path)] for filename in file_list: exp_id = filename[:24] if filename.endswith('_data.json') and (exp_id + ".jpg") in file_list and (exp_id + "_thumb.jpg") in file_list: try: filepath = os.path.join(extract_path, filename) with open(filepath, 'r') as json_file: data = json.load(json_file) files_data.append(data) if 'date' in data: file_time = data['date'] else: file_time = datetime.datetime.fromtimestamp(os.path.getmtime(filepath)).strftime("%Y-%m-%d %H:%M:%S.%f") files_list.append(filename) files_time.append(file_time) except: pass cloud_files = cloud_obj.list_files_info(folder="extract", resource_id=dataset_id) file_list = [f["filename"] for f in cloud_files] for cloud_file in cloud_files: exp_id = cloud_file["filename"][:24] if cloud_file["filename"].endswith('_data.json') \ and cloud_file["filename"] not in files_list \ and (exp_id + ".jpg") in file_list \ and (exp_id + "_thumb.jpg") in file_list: try: data = json.loads(cloud_obj.download_file(folder="extract", resource_id=dataset_id, filename=cloud_file["filename"])) files_data.append(data) if 'date' in data: file_time = data['date'] else: file_time = cloud_file["creation_date"].strftime("%Y-%m-%d %H:%M:%S.%f") files_list.append(filename) files_time.append(file_time) except: pass extract_files = { "files_data": files_data, "extract_list": sorted(zip(files_list, files_time), key=lambda x: x[1], reverse=True) } # save extract info in storage with open(os.path.join(extract_path, 'extract_files.json'), 'w', newline='', encoding='utf8') as file_p: json.dump(extract_files, file_p, ensure_ascii=False, default=str) # save extract info in database client = MongoClient(db_config["db_url"]) db_mongo = client[db_config["db_name"]] db_mongo.extract.delete_one({"_id": ObjectId(dataset_id)}) extract_files["_id"] = ObjectId(dataset_id) db_mongo.extract.insert_one(extract_files) file_ac = FileAccess(storage="extract", resource_id=dataset_id, cloud_parms=cloud_parms) # clear_log(file_ac.get_local_folder()) file_ac.purge_files(max_files=max_files) generate_extract_thumbs(file_ac.get_local_folder()) file_ac.sync_files(origin="local") save_extract_list(file_ac.get_local_folder()) #---------------------------------------------------------------------------------------------------------------------------------- class VideoLog(object): def __init__(self, dataset_id, vlog_size): self._vlog_size = vlog_size file_ac = FileAccess(storage="extract", resource_id=dataset_id) self._dest_path = file_ac.get_local_folder() self._dataset_id = dataset_id self._last_log = datetime.datetime(2000, 1, 1) def log_batch(self, image_batch, output_batch, annotations): for idx, image in enumerate(image_batch): if random.random() < float(self._vlog_size): obj_id = str(ObjectId()) filename = obj_id + '.jpg' file_thumb = filename[:-4] + "_thumb.jpg" if isinstance(image, dict): cv2.imwrite(os.path.join(self._dest_path, filename), image["input_image"]) file_stat_img = os.stat(os.path.join(self._dest_path, filename)) if max(image["input_image"].shape) > THUMB_SIZE: img, _ = resize_image_scale(image["input_image"], THUMB_SIZE) cv2.imwrite(os.path.join(self._dest_path, file_thumb), img) else: cv2.imwrite(os.path.join(self._dest_path, file_thumb), image["input_image"]) file_stat_thumb = os.stat(os.path.join(self._dest_path, file_thumb)) img_data = { "_id": obj_id, "date": pytz.utc.localize(datetime.datetime.now()), "img_height": image["input_image"].shape[0], "img_width": image["input_image"].shape[1], "file_size": file_stat_img.st_size, "thumb_size": file_stat_thumb.st_size, "annotations": annotations[idx] } if 'frame_time' in image["frame_data"]: img_data['frame_time'] = image["frame_data"]['frame_time'] if 'video_data' in image["frame_data"]: img_data['video_data'] = image["frame_data"]['video_data'] else: cv2.imwrite(os.path.join(self._dest_path, filename), image[0]) file_stat_img = os.stat(os.path.join(self._dest_path, filename)) if max(image[0].shape) > THUMB_SIZE: img, _ = resize_image_scale(image[0], THUMB_SIZE) cv2.imwrite(os.path.join(self._dest_path, file_thumb), img) else: cv2.imwrite(os.path.join(self._dest_path, file_thumb), image[0]) file_stat_thumb = os.stat(os.path.join(self._dest_path, file_thumb)) img_data = { "_id": obj_id, "date": pytz.utc.localize(datetime.datetime.now()), "img_height": image[0].shape[0], "img_width": image[0].shape[1], "file_size": file_stat_img.st_size, "thumb_size": file_stat_thumb.st_size, "annotations": annotations[idx] } if 'frame_time' in image[2]: img_data['frame_time'] = image[2]['frame_time'] if 'video_file' in image[2]: img_data['video_file'] = image[2]['video_file'] with open(os.path.join(self._dest_path, obj_id + '_data.json'), 'w', newline='', encoding='utf8') as file_p: json.dump(img_data, file_p, ensure_ascii=False, default=str) if (datetime.datetime.now() - self._last_log) > datetime.timedelta(minutes=1): clear_log(self._dest_path) self._last_log = datetime.datetime.now() #----------------------------------------------------------------------------------------------------------------------------------
import json with open('sunless.dat', 'w') as f: for name in ['areas', 'events', 'exchanges', 'personas', 'qualities']: with open(f'{name}_import.json') as g: data = json.loads(g.read()) for line in data: entry = {'key': f'{name}:{line['Id']}', 'value': line} f.write(f"{json.dumps(entry)}\n") with open('settings.json') as g: f.write(g.read())
import json with open('sunless.dat', 'w') as f: for name in ['areas', 'events', 'exchanges', 'personas', 'qualities']: with open(f'{name}_import.json') as g: data = json.loads(g.read()) for line in data: entry = {'key': f'{name}:{line["Id"]}', 'value': line} f.write(f"{json.dumps(entry)}\n") with open('settings.json') as g: f.write(g.read())
import os import shutil import subprocess from cookiecutter.main import cookiecutter from pathlib import Path import click import yaml from ctfcli.utils.challenge import ( create_challenge, lint_challenge, load_challenge, load_installed_challenges, sync_challenge, ) from ctfcli.utils.config import ( get_base_path, get_config_path, get_project_path, load_config, ) from ctfcli.utils.spec import ( CHALLENGE_SPEC_DOCS, blank_challenge_spec, ) class Challenge(object): def new(self, type): path = Path(get_base_path()) if os.sep not in type: type += os.sep + "default" path = path / "templates" / type cookiecutter(str(path)) def add(self, repo): config = load_config() if repo.endswith(".git"): # Get relative path from project root to current directory challenge_path = Path(os.path.relpath(os.getcwd(), get_project_path())) # Get new directory that will exist after clone base_repo_path = Path(os.path.basename(repo).rsplit(".", maxsplit=1)[0]) # Join targets challenge_path = challenge_path / base_repo_path print(challenge_path) config["challenges"][str(challenge_path)] = repo with open(get_config_path(), "w+") as f: config.write(f) subprocess.call(["git", "clone", "--depth", "1", repo]) shutil.rmtree(str(base_repo_path / ".git")) elif Path(repo).exists(): config["challenges"][repo] = repo with open(get_config_path(), "w+") as f: config.write(f) else: click.secho( "Couldn't process that challenge path. Please check it for errors.", fg="red", ) def restore(self, challenge=None): config = load_config() challenges = dict(config["challenges"]) for folder, url in challenges.items(): if url.endswith(".git"): if challenge is not None and folder != challenge: continue click.echo(f"Cloning {url} to {folder}") subprocess.call(["git", "clone", "--depth", "1", url, folder]) shutil.rmtree(str(Path(folder) / ".git")) else: click.echo(f"Skipping {url} - {folder}") def install(self, challenge=None, force=False): if challenge is None: challenge = os.getcwd() path = Path(challenge) if path.name.endswith(".yml") is False: path = path / "challenge.yml" click.secho(f"Found {path}") challenge = load_challenge(path) click.secho(f'Loaded {challenge['name']}', fg="yellow") installed_challenges = load_installed_challenges() for c in installed_challenges: if c["name"] == challenge["name"]: click.secho( "Already found existing challenge with same name. Perhaps you meant sync instead of install?", fg="red", ) if force is True: click.secho( "Ignoring existing challenge because of --force", fg="yellow" ) else: return click.secho(f'Installing {challenge['name']}', fg="yellow") create_challenge(challenge=challenge) click.secho(f"Success!", fg="green") def sync(self, challenge=None): if challenge is None: challenge = os.getcwd() path = Path(challenge) if path.name.endswith(".yml") is False: path = path / "challenge.yml" click.secho(f"Found {path}") challenge = load_challenge(path) click.secho(f'Loaded {challenge['name']}', fg="yellow") installed_challenges = load_installed_challenges() for c in installed_challenges: if c["name"] == challenge["name"]: break else: click.secho( "Couldn't find existing challenge. Perhaps you meant install instead of sync?", fg="red", ) click.secho(f'Syncing {challenge['name']}', fg="yellow") sync_challenge(challenge=challenge) click.secho(f"Success!", fg="green") def update(self, challenge=None): config = load_config() challenges = dict(config["challenges"]) for folder, url in challenges.items(): if challenge and challenge != folder: continue if url.endswith(".git"): click.echo(f"Cloning {url} to {folder}") subprocess.call(["git", "init"], cwd=folder) subprocess.call(["git", "remote", "add", "origin", url], cwd=folder) subprocess.call(["git", "add", "-A"], cwd=folder) subprocess.call( ["git", "commit", "-m", "Persist local changes (ctfcli)"], cwd=folder, ) subprocess.call( ["git", "pull", "--allow-unrelated-histories", "origin", "master"], cwd=folder, ) subprocess.call(["git", "mergetool"], cwd=folder) subprocess.call(["git", "clean", "-f"], cwd=folder) subprocess.call(["git", "commit", "--no-edit"], cwd=folder) shutil.rmtree(str(Path(folder) / ".git")) else: click.echo(f"Skipping {url} - {folder}") def finalize(self, challenge=None): if challenge is None: challenge = os.getcwd() path = Path(challenge) spec = blank_challenge_spec() for k in spec: q = CHALLENGE_SPEC_DOCS.get(k) fields = q._asdict() ask = False required = fields.pop("required", False) if required is False: try: ask = click.confirm(f"Would you like to add the {k} field?") if ask is False: continue except click.Abort: click.echo("\n") continue if ask is True: fields["text"] = "\t" + fields["text"] multiple = fields.pop("multiple", False) if multiple: fields["text"] += " (Ctrl-C to continue)" spec[k] = [] try: while True: r = click.prompt(**fields) spec[k].append(r) except click.Abort: click.echo("\n") else: try: r = click.prompt(**fields) spec[k] = r except click.Abort: click.echo("\n") with open(path / "challenge.yml", "w+") as f: yaml.dump(spec, stream=f, default_flow_style=False, sort_keys=False) print("challenge.yml written to", path / "challenge.yml") def lint(self, challenge=None): if challenge is None: challenge = os.getcwd() path = Path(challenge) if path.name.endswith(".yml") is False: path = path / "challenge.yml" lint_challenge(path)
import os import shutil import subprocess from cookiecutter.main import cookiecutter from pathlib import Path import click import yaml from ctfcli.utils.challenge import ( create_challenge, lint_challenge, load_challenge, load_installed_challenges, sync_challenge, ) from ctfcli.utils.config import ( get_base_path, get_config_path, get_project_path, load_config, ) from ctfcli.utils.spec import ( CHALLENGE_SPEC_DOCS, blank_challenge_spec, ) class Challenge(object): def new(self, type): path = Path(get_base_path()) if os.sep not in type: type += os.sep + "default" path = path / "templates" / type cookiecutter(str(path)) def add(self, repo): config = load_config() if repo.endswith(".git"): # Get relative path from project root to current directory challenge_path = Path(os.path.relpath(os.getcwd(), get_project_path())) # Get new directory that will exist after clone base_repo_path = Path(os.path.basename(repo).rsplit(".", maxsplit=1)[0]) # Join targets challenge_path = challenge_path / base_repo_path print(challenge_path) config["challenges"][str(challenge_path)] = repo with open(get_config_path(), "w+") as f: config.write(f) subprocess.call(["git", "clone", "--depth", "1", repo]) shutil.rmtree(str(base_repo_path / ".git")) elif Path(repo).exists(): config["challenges"][repo] = repo with open(get_config_path(), "w+") as f: config.write(f) else: click.secho( "Couldn't process that challenge path. Please check it for errors.", fg="red", ) def restore(self, challenge=None): config = load_config() challenges = dict(config["challenges"]) for folder, url in challenges.items(): if url.endswith(".git"): if challenge is not None and folder != challenge: continue click.echo(f"Cloning {url} to {folder}") subprocess.call(["git", "clone", "--depth", "1", url, folder]) shutil.rmtree(str(Path(folder) / ".git")) else: click.echo(f"Skipping {url} - {folder}") def install(self, challenge=None, force=False): if challenge is None: challenge = os.getcwd() path = Path(challenge) if path.name.endswith(".yml") is False: path = path / "challenge.yml" click.secho(f"Found {path}") challenge = load_challenge(path) click.secho(f'Loaded {challenge["name"]}', fg="yellow") installed_challenges = load_installed_challenges() for c in installed_challenges: if c["name"] == challenge["name"]: click.secho( "Already found existing challenge with same name. Perhaps you meant sync instead of install?", fg="red", ) if force is True: click.secho( "Ignoring existing challenge because of --force", fg="yellow" ) else: return click.secho(f'Installing {challenge["name"]}', fg="yellow") create_challenge(challenge=challenge) click.secho(f"Success!", fg="green") def sync(self, challenge=None): if challenge is None: challenge = os.getcwd() path = Path(challenge) if path.name.endswith(".yml") is False: path = path / "challenge.yml" click.secho(f"Found {path}") challenge = load_challenge(path) click.secho(f'Loaded {challenge["name"]}', fg="yellow") installed_challenges = load_installed_challenges() for c in installed_challenges: if c["name"] == challenge["name"]: break else: click.secho( "Couldn't find existing challenge. Perhaps you meant install instead of sync?", fg="red", ) click.secho(f'Syncing {challenge["name"]}', fg="yellow") sync_challenge(challenge=challenge) click.secho(f"Success!", fg="green") def update(self, challenge=None): config = load_config() challenges = dict(config["challenges"]) for folder, url in challenges.items(): if challenge and challenge != folder: continue if url.endswith(".git"): click.echo(f"Cloning {url} to {folder}") subprocess.call(["git", "init"], cwd=folder) subprocess.call(["git", "remote", "add", "origin", url], cwd=folder) subprocess.call(["git", "add", "-A"], cwd=folder) subprocess.call( ["git", "commit", "-m", "Persist local changes (ctfcli)"], cwd=folder, ) subprocess.call( ["git", "pull", "--allow-unrelated-histories", "origin", "master"], cwd=folder, ) subprocess.call(["git", "mergetool"], cwd=folder) subprocess.call(["git", "clean", "-f"], cwd=folder) subprocess.call(["git", "commit", "--no-edit"], cwd=folder) shutil.rmtree(str(Path(folder) / ".git")) else: click.echo(f"Skipping {url} - {folder}") def finalize(self, challenge=None): if challenge is None: challenge = os.getcwd() path = Path(challenge) spec = blank_challenge_spec() for k in spec: q = CHALLENGE_SPEC_DOCS.get(k) fields = q._asdict() ask = False required = fields.pop("required", False) if required is False: try: ask = click.confirm(f"Would you like to add the {k} field?") if ask is False: continue except click.Abort: click.echo("\n") continue if ask is True: fields["text"] = "\t" + fields["text"] multiple = fields.pop("multiple", False) if multiple: fields["text"] += " (Ctrl-C to continue)" spec[k] = [] try: while True: r = click.prompt(**fields) spec[k].append(r) except click.Abort: click.echo("\n") else: try: r = click.prompt(**fields) spec[k] = r except click.Abort: click.echo("\n") with open(path / "challenge.yml", "w+") as f: yaml.dump(spec, stream=f, default_flow_style=False, sort_keys=False) print("challenge.yml written to", path / "challenge.yml") def lint(self, challenge=None): if challenge is None: challenge = os.getcwd() path = Path(challenge) if path.name.endswith(".yml") is False: path = path / "challenge.yml" lint_challenge(path)
import eodslib import os from pathlib import Path import pytest import os @pytest.mark.skip_real() def test_create(set_output_dir, modify_id_list, unique_run_string): output_dir = set_output_dir conn = { 'domain': os.getenv("HOST"), 'username': os.getenv("API_USER"), 'access_token': os.getenv("API_TOKEN"), } eods_params = { 'output_dir':output_dir, 'title':'keep_api_test_create_group', 'verify': False, #'limit':1, } list_of_layers, _ = eodslib.query_catalog(conn, **eods_params) os.rename(output_dir / 'eods-query-all-results.csv', output_dir / 'eods-query-all-results-create-group-test.csv') errors = [] response_json = eodslib.create_layer_group( conn, list_of_layers, 'eodslib-create-layer-test-' + unique_run_string, abstract='some description of the layer group ' + unique_run_string ) if not modify_id_list: modify_id_list.append(response_json['id']) lower_unique_run_string = unique_run_string.lower().replace('-', '_').replace('/', '').replace(':', '') # content checks if len(response_json['layers']) != 1: errors.append(f"Content Error: \'layers\' in response text should contain only 1 layers, got {len(response_json["layers"])} layers") if response_json['layers'][0] != 'geonode:keep_api_test_create_group': errors.append(f"Content Error: 1st layer of \'layers\' in response text should be \'geonode:keep_api_test_create_group\', it was \'{response_json["layers"][0]}\'") if response_json['abstract'] != 'some description of the layer group ' + unique_run_string: errors.append(f"Content Error: \'abstract\' in response text should be \'some description of the layer group {unique_run_string}\", it was \"{response_json["abstract"]}\'") if response_json['alternate'] != 'geonode:eodslib_create_layer_test_' + lower_unique_run_string: errors.append(f"Content Error: \'alternate\' in response text should be \'geonode:eodslib_create_layer_test_{lower_unique_run_string}\", it was \"{response_json["alternate"]}\'") if response_json['name'] != 'eodslib_create_layer_test_' + lower_unique_run_string: errors.append(f"Content Error: \'name\' in response text should be \'eodslib_create_layer_test_{lower_unique_run_string}\", it was \"{response_json["name"]}\'") if response_json['title'] != 'eodslib-create-layer-test-' + unique_run_string: errors.append(f"Content Error: \'title\' in response text should be \'eodslib-create-layer-test-{unique_run_string}\", it was \"{response_json["title"]}\'") assert not errors
import eodslib import os from pathlib import Path import pytest import os @pytest.mark.skip_real() def test_create(set_output_dir, modify_id_list, unique_run_string): output_dir = set_output_dir conn = { 'domain': os.getenv("HOST"), 'username': os.getenv("API_USER"), 'access_token': os.getenv("API_TOKEN"), } eods_params = { 'output_dir':output_dir, 'title':'keep_api_test_create_group', 'verify': False, #'limit':1, } list_of_layers, _ = eodslib.query_catalog(conn, **eods_params) os.rename(output_dir / 'eods-query-all-results.csv', output_dir / 'eods-query-all-results-create-group-test.csv') errors = [] response_json = eodslib.create_layer_group( conn, list_of_layers, 'eodslib-create-layer-test-' + unique_run_string, abstract='some description of the layer group ' + unique_run_string ) if not modify_id_list: modify_id_list.append(response_json['id']) lower_unique_run_string = unique_run_string.lower().replace('-', '_').replace('/', '').replace(':', '') # content checks if len(response_json['layers']) != 1: errors.append(f"Content Error: \'layers\' in response text should contain only 1 layers, got {len(response_json['layers'])} layers") if response_json['layers'][0] != 'geonode:keep_api_test_create_group': errors.append(f"Content Error: 1st layer of \'layers\' in response text should be \'geonode:keep_api_test_create_group\', it was \'{response_json['layers'][0]}\'") if response_json['abstract'] != 'some description of the layer group ' + unique_run_string: errors.append(f"Content Error: \'abstract\' in response text should be \'some description of the layer group {unique_run_string}\', it was \'{response_json['abstract']}\'") if response_json['alternate'] != 'geonode:eodslib_create_layer_test_' + lower_unique_run_string: errors.append(f"Content Error: \'alternate\' in response text should be \'geonode:eodslib_create_layer_test_{lower_unique_run_string}\', it was \'{response_json['alternate']}\'") if response_json['name'] != 'eodslib_create_layer_test_' + lower_unique_run_string: errors.append(f"Content Error: \'name\' in response text should be \'eodslib_create_layer_test_{lower_unique_run_string}\', it was \'{response_json['name']}\'") if response_json['title'] != 'eodslib-create-layer-test-' + unique_run_string: errors.append(f"Content Error: \'title\' in response text should be \'eodslib-create-layer-test-{unique_run_string}\', it was \'{response_json['title']}\'") assert not errors
""" Author: Roman Slyusar (https://github.com/Roman-R2) Сортировщик файлов (например фотографий) Проблематика: скопилось много папок с фотографиями, в том числе и копий фотографий на разных дисках, скопированных с нескольких независимых источников в разное время. Требования: 1. Отсортировать файлы по датам: -. Определить разрешенные форматы и исходную папку. а. Пройтись по файлам и подпапкам в указанной папке. б. Исключить дублирование фотографий в. Для каждого разрешенного формата создать отдельную папку г. Положить копии файлов в папку, в названии которой будет указана дата файла. д. Вывести статистику работы программы """ import os import shutil import time from datetime import datetime from dotenv import load_dotenv from tqdm import tqdm load_dotenv() # Папка с исходными, неотсортированными файлами UNSORTED_FOLDER = os.getenv('UNSORTED_FOLDER') # Папка для результата выполнения скрипта TARGET_FOLDER = os.getenv('TARGET_FOLDER') # Разрешенные для сортировки форматы ALLOWED_FORMATS = tuple(os.getenv('ALLOWED_FORMATS').split(',')) print(ALLOWED_FORMATS) def get_file_id(create_date, bytes_size) -> str: """Вернет уникальный идентификатор для файла""" return f"{create_date.strftime("%Y%m%d%H%M%S")}{bytes_size}" def get_data_folder_name(file_create_date) -> str: """Вернет строку для имени папки, содержащей дату создания фала""" return file_create_date.strftime('%Y-%m-%d') def str_to_time(str_time: str): return datetime.strptime(str_time, '%a %b %d %H:%M:%S %Y') class PhotoSorter: STAT_FILE_FOR_DUPLICATES = 'stat_duplicate_files.txt' STAT_FILE_FOR_UNPROCESSED = 'stat_unprocessed_files.txt' def __init__( self, unsorted_folder: str, target_folder: str, allowed_formats: tuple ): self._unsorted_folder = unsorted_folder self._target_folder = target_folder self._allowed_formats = allowed_formats self.__unprocessed_files = [] self.__duplicate_files = [] self.__shadow_folder_struct = {} def start(self): """ Главный метод-обработчик :return: None """ self._prepare_folder() self._prepare_files_for_statistics() self._walk_the_files() self._store_stat_duplicate_files() self._store_stat_unprocessed_files() self._say_end() def _prepare_files_for_statistics(self): """Подготовит новые файлы для статистики или очистит пред идущие""" with open( os.path.join( self._target_folder, self.STAT_FILE_FOR_DUPLICATES ), 'w' ) as fd: pass with open( os.path.join( self._target_folder, self.STAT_FILE_FOR_UNPROCESSED ), 'w' ) as fd: pass def _store_stat_duplicate_files(self): """Сохранит информацию о найденных дубликатах файлов в специальный файл статистики""" self._add_info_to_file( filename=self.STAT_FILE_FOR_DUPLICATES, list_of_files=self.__duplicate_files, default_str='Дубликатов файлов не найдено...' ) def _store_stat_unprocessed_files(self): """Сохранит информацию о найденных дубликатах файлов в специальный файл статистики""" self._add_info_to_file( filename=self.STAT_FILE_FOR_UNPROCESSED, list_of_files=self.__unprocessed_files, default_str='Нет необработанных файлов...' ) def _add_info_to_file( self, filename: str, list_of_files: list, default_str: str = '---' ): """Сохранит информацю о статистике в указанный файл.""" with open( os.path.join(self._target_folder, filename), 'a' ) as fd: if not self.__duplicate_files: fd.writelines(default_str) else: for count, file in enumerate(list_of_files, start=1): fd.writelines(f'{count}.\t{file}\n') def _say_end(self): """Выведет сообщение об окончании работы""" print(f"-----------------------------------------------------------") print(f"---------- Сортировка файлов окончена! --------------------") print(f"-----------------------------------------------------------") def _prepare_folder(self): """ Подготовит структуру папок в папке вывода результата """ # Если папка существует, то удалим ее со всеми вложенными файлами if os.path.exists(self._target_folder): shutil.rmtree(self._target_folder) # Создадим папку и структуру подпапок заново os.mkdir(self._target_folder) for file_format in self._allowed_formats: os.mkdir(os.path.join(self._target_folder, file_format)) self.__shadow_folder_struct[file_format] = {} print("Структура папок подготовлена.") def _walk_the_files(self): """Пройдется по файлам в заданной папке""" for root, dirs, files in os.walk(self._unsorted_folder): if files: print(f"Обработка папки {root}") time.sleep(1) for file in tqdm(files): full_path_to_file = os.path.join(root, file) filename, file_extension = os.path.splitext( full_path_to_file) file_extension = file_extension.lower() if file_extension in self._allowed_formats: self._process_file(full_path_to_file, file_extension) else: self.__unprocessed_files.append(full_path_to_file) def _show_unprocessed_files(self): """Выведет в консоль файлы, которые не подошли по формату""" if not self.__unprocessed_files: print("Все файлы были обработаны...") else: print("Не обработанные фалы: ") for count, file in enumerate(self.__unprocessed_files, start=1): print(f"\t{count}. {file}") def _show_duplicate_files(self): """Выведет в консоль файлы, которые оказались идентичны уже скопированным в папку файлам по ряду параметров (дата создания и размер)""" if not self.__duplicate_files: print("Дубликатов файлов не найдено...") else: print("Дубликаты фалов: ") for count, file in enumerate(self.__duplicate_files, start=1): print(f"\t{count}. {file}") def _process_file(self, full_path_to_file, file_extension): """Обработает файл в соответствии с логикой""" file_creation_time = str_to_time( time.ctime(os.path.getmtime(full_path_to_file)) ) file_size_in_bytes = os.path.getsize(full_path_to_file) folder_name_for_file = get_data_folder_name(file_creation_time) folder_for_save = os.path.join( self._target_folder, file_extension, folder_name_for_file ) # Создадим папку для файла, если ее еще нет if not os.path.exists(folder_for_save): os.mkdir(folder_for_save) # Запишем папку в структуру для сравнения файлов self.__shadow_folder_struct[file_extension][ folder_name_for_file ] = [] # Получим уникальный ключ файла file_id = get_file_id( file_creation_time, file_size_in_bytes ) # Проверим файл на идентичность уже скопированным файлам по ряду # параметров (дата создания и размер) if file_id not in self.__shadow_folder_struct[file_extension][ folder_name_for_file]: # Добавим идентификатор файла в структуру для сравнения self.__shadow_folder_struct[file_extension][ folder_name_for_file].append(file_id) # Скопируем файл с его атрибутами в нужную папку shutil.copy2(full_path_to_file, folder_for_save) else: # Добавим файл в список дубликатов self.__duplicate_files.append(full_path_to_file) if __name__ == '__main__': photo_sorter_obj = PhotoSorter( UNSORTED_FOLDER, TARGET_FOLDER, ALLOWED_FORMATS ) photo_sorter_obj.start()
""" Author: Roman Slyusar (https://github.com/Roman-R2) Сортировщик файлов (например фотографий) Проблематика: скопилось много папок с фотографиями, в том числе и копий фотографий на разных дисках, скопированных с нескольких независимых источников в разное время. Требования: 1. Отсортировать файлы по датам: -. Определить разрешенные форматы и исходную папку. а. Пройтись по файлам и подпапкам в указанной папке. б. Исключить дублирование фотографий в. Для каждого разрешенного формата создать отдельную папку г. Положить копии файлов в папку, в названии которой будет указана дата файла. д. Вывести статистику работы программы """ import os import shutil import time from datetime import datetime from dotenv import load_dotenv from tqdm import tqdm load_dotenv() # Папка с исходными, неотсортированными файлами UNSORTED_FOLDER = os.getenv('UNSORTED_FOLDER') # Папка для результата выполнения скрипта TARGET_FOLDER = os.getenv('TARGET_FOLDER') # Разрешенные для сортировки форматы ALLOWED_FORMATS = tuple(os.getenv('ALLOWED_FORMATS').split(',')) print(ALLOWED_FORMATS) def get_file_id(create_date, bytes_size) -> str: """Вернет уникальный идентификатор для файла""" return f"{create_date.strftime('%Y%m%d%H%M%S')}{bytes_size}" def get_data_folder_name(file_create_date) -> str: """Вернет строку для имени папки, содержащей дату создания фала""" return file_create_date.strftime('%Y-%m-%d') def str_to_time(str_time: str): return datetime.strptime(str_time, '%a %b %d %H:%M:%S %Y') class PhotoSorter: STAT_FILE_FOR_DUPLICATES = 'stat_duplicate_files.txt' STAT_FILE_FOR_UNPROCESSED = 'stat_unprocessed_files.txt' def __init__( self, unsorted_folder: str, target_folder: str, allowed_formats: tuple ): self._unsorted_folder = unsorted_folder self._target_folder = target_folder self._allowed_formats = allowed_formats self.__unprocessed_files = [] self.__duplicate_files = [] self.__shadow_folder_struct = {} def start(self): """ Главный метод-обработчик :return: None """ self._prepare_folder() self._prepare_files_for_statistics() self._walk_the_files() self._store_stat_duplicate_files() self._store_stat_unprocessed_files() self._say_end() def _prepare_files_for_statistics(self): """Подготовит новые файлы для статистики или очистит пред идущие""" with open( os.path.join( self._target_folder, self.STAT_FILE_FOR_DUPLICATES ), 'w' ) as fd: pass with open( os.path.join( self._target_folder, self.STAT_FILE_FOR_UNPROCESSED ), 'w' ) as fd: pass def _store_stat_duplicate_files(self): """Сохранит информацию о найденных дубликатах файлов в специальный файл статистики""" self._add_info_to_file( filename=self.STAT_FILE_FOR_DUPLICATES, list_of_files=self.__duplicate_files, default_str='Дубликатов файлов не найдено...' ) def _store_stat_unprocessed_files(self): """Сохранит информацию о найденных дубликатах файлов в специальный файл статистики""" self._add_info_to_file( filename=self.STAT_FILE_FOR_UNPROCESSED, list_of_files=self.__unprocessed_files, default_str='Нет необработанных файлов...' ) def _add_info_to_file( self, filename: str, list_of_files: list, default_str: str = '---' ): """Сохранит информацю о статистике в указанный файл.""" with open( os.path.join(self._target_folder, filename), 'a' ) as fd: if not self.__duplicate_files: fd.writelines(default_str) else: for count, file in enumerate(list_of_files, start=1): fd.writelines(f'{count}.\t{file}\n') def _say_end(self): """Выведет сообщение об окончании работы""" print(f"-----------------------------------------------------------") print(f"---------- Сортировка файлов окончена! --------------------") print(f"-----------------------------------------------------------") def _prepare_folder(self): """ Подготовит структуру папок в папке вывода результата """ # Если папка существует, то удалим ее со всеми вложенными файлами if os.path.exists(self._target_folder): shutil.rmtree(self._target_folder) # Создадим папку и структуру подпапок заново os.mkdir(self._target_folder) for file_format in self._allowed_formats: os.mkdir(os.path.join(self._target_folder, file_format)) self.__shadow_folder_struct[file_format] = {} print("Структура папок подготовлена.") def _walk_the_files(self): """Пройдется по файлам в заданной папке""" for root, dirs, files in os.walk(self._unsorted_folder): if files: print(f"Обработка папки {root}") time.sleep(1) for file in tqdm(files): full_path_to_file = os.path.join(root, file) filename, file_extension = os.path.splitext( full_path_to_file) file_extension = file_extension.lower() if file_extension in self._allowed_formats: self._process_file(full_path_to_file, file_extension) else: self.__unprocessed_files.append(full_path_to_file) def _show_unprocessed_files(self): """Выведет в консоль файлы, которые не подошли по формату""" if not self.__unprocessed_files: print("Все файлы были обработаны...") else: print("Не обработанные фалы: ") for count, file in enumerate(self.__unprocessed_files, start=1): print(f"\t{count}. {file}") def _show_duplicate_files(self): """Выведет в консоль файлы, которые оказались идентичны уже скопированным в папку файлам по ряду параметров (дата создания и размер)""" if not self.__duplicate_files: print("Дубликатов файлов не найдено...") else: print("Дубликаты фалов: ") for count, file in enumerate(self.__duplicate_files, start=1): print(f"\t{count}. {file}") def _process_file(self, full_path_to_file, file_extension): """Обработает файл в соответствии с логикой""" file_creation_time = str_to_time( time.ctime(os.path.getmtime(full_path_to_file)) ) file_size_in_bytes = os.path.getsize(full_path_to_file) folder_name_for_file = get_data_folder_name(file_creation_time) folder_for_save = os.path.join( self._target_folder, file_extension, folder_name_for_file ) # Создадим папку для файла, если ее еще нет if not os.path.exists(folder_for_save): os.mkdir(folder_for_save) # Запишем папку в структуру для сравнения файлов self.__shadow_folder_struct[file_extension][ folder_name_for_file ] = [] # Получим уникальный ключ файла file_id = get_file_id( file_creation_time, file_size_in_bytes ) # Проверим файл на идентичность уже скопированным файлам по ряду # параметров (дата создания и размер) if file_id not in self.__shadow_folder_struct[file_extension][ folder_name_for_file]: # Добавим идентификатор файла в структуру для сравнения self.__shadow_folder_struct[file_extension][ folder_name_for_file].append(file_id) # Скопируем файл с его атрибутами в нужную папку shutil.copy2(full_path_to_file, folder_for_save) else: # Добавим файл в список дубликатов self.__duplicate_files.append(full_path_to_file) if __name__ == '__main__': photo_sorter_obj = PhotoSorter( UNSORTED_FOLDER, TARGET_FOLDER, ALLOWED_FORMATS ) photo_sorter_obj.start()
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json from unittest import mock import pytest from airflow.providers.google.cloud.hooks.datafusion import DataFusionHook from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id API_VERSION = "v1beta1" GCP_CONN_ID = "google_cloud_default" HOOK_STR = "airflow.providers.google.cloud.hooks.datafusion.{}" LOCATION = "test-location" INSTANCE_NAME = "airflow-test-instance" INSTANCE = {"type": "BASIC", "displayName": INSTANCE_NAME} PROJECT_ID = "test_project_id" PIPELINE_NAME = "shrubberyPipeline" PIPELINE = {"test": "pipeline"} INSTANCE_URL = "http://datafusion.instance.com" RUNTIME_ARGS = {"arg1": "a", "arg2": "b"} @pytest.fixture def hook(): with mock.patch( "airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__", new=mock_base_gcp_hook_default_project_id, ): yield DataFusionHook(gcp_conn_id=GCP_CONN_ID) class TestDataFusionHook: @staticmethod def mock_endpoint(get_conn_mock): return get_conn_mock.return_value.projects.return_value.locations.return_value.instances.return_value def test_name(self, hook): expected = f"projects/{PROJECT_ID}/locations/{LOCATION}/instances/{INSTANCE_NAME}" assert hook._name(PROJECT_ID, LOCATION, INSTANCE_NAME) == expected def test_parent(self, hook): expected = f"projects/{PROJECT_ID}/locations/{LOCATION}" assert hook._parent(PROJECT_ID, LOCATION) == expected @mock.patch(HOOK_STR.format("build")) @mock.patch(HOOK_STR.format("DataFusionHook._authorize")) def test_get_conn(self, mock_authorize, mock_build, hook): mock_authorize.return_value = "test" hook.get_conn() mock_build.assert_called_once_with("datafusion", hook.api_version, http="test", cache_discovery=False) @mock.patch(HOOK_STR.format("DataFusionHook.get_conn")) def test_restart_instance(self, get_conn_mock, hook): method_mock = self.mock_endpoint(get_conn_mock).restart method_mock.return_value.execute.return_value = "value" result = hook.restart_instance(instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID) assert result == "value" method_mock.assert_called_once_with(name=hook._name(PROJECT_ID, LOCATION, INSTANCE_NAME)) @mock.patch(HOOK_STR.format("DataFusionHook.get_conn")) def test_delete_instance(self, get_conn_mock, hook): method_mock = self.mock_endpoint(get_conn_mock).delete method_mock.return_value.execute.return_value = "value" result = hook.delete_instance(instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID) assert result == "value" method_mock.assert_called_once_with(name=hook._name(PROJECT_ID, LOCATION, INSTANCE_NAME)) @mock.patch(HOOK_STR.format("DataFusionHook.get_conn")) def test_create_instance(self, get_conn_mock, hook): method_mock = self.mock_endpoint(get_conn_mock).create method_mock.return_value.execute.return_value = "value" result = hook.create_instance( instance_name=INSTANCE_NAME, instance=INSTANCE, location=LOCATION, project_id=PROJECT_ID, ) assert result == "value" method_mock.assert_called_once_with( parent=hook._parent(PROJECT_ID, LOCATION), body=INSTANCE, instanceId=INSTANCE_NAME, ) @mock.patch(HOOK_STR.format("DataFusionHook.get_conn")) def test_patch_instance(self, get_conn_mock, hook): method_mock = self.mock_endpoint(get_conn_mock).patch method_mock.return_value.execute.return_value = "value" result = hook.patch_instance( instance_name=INSTANCE_NAME, instance=INSTANCE, update_mask="instance.name", location=LOCATION, project_id=PROJECT_ID, ) assert result == "value" method_mock.assert_called_once_with( name=hook._name(PROJECT_ID, LOCATION, INSTANCE_NAME), body=INSTANCE, updateMask="instance.name", ) @mock.patch(HOOK_STR.format("DataFusionHook.get_conn")) def test_get_instance(self, get_conn_mock, hook): method_mock = self.mock_endpoint(get_conn_mock).get method_mock.return_value.execute.return_value = "value" result = hook.get_instance(instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID) assert result == "value" method_mock.assert_called_once_with(name=hook._name(PROJECT_ID, LOCATION, INSTANCE_NAME)) @mock.patch("google.auth.transport.requests.Request") @mock.patch(HOOK_STR.format("DataFusionHook._get_credentials")) def test_cdap_request(self, get_credentials_mock, mock_request, hook): url = "test_url" headers = {"Content-Type": "application/json"} method = "POST" request = mock_request.return_value request.return_value = mock.MagicMock() body = {"data": "value"} result = hook._cdap_request(url=url, method=method, body=body) mock_request.assert_called_once_with() get_credentials_mock.assert_called_once_with() get_credentials_mock.return_value.before_request.assert_called_once_with( request=request, method=method, url=url, headers=headers ) request.assert_called_once_with(method=method, url=url, headers=headers, body=json.dumps(body)) assert result == request.return_value @mock.patch(HOOK_STR.format("DataFusionHook._cdap_request")) def test_create_pipeline(self, mock_request, hook): mock_request.return_value.status = 200 hook.create_pipeline(pipeline_name=PIPELINE_NAME, pipeline=PIPELINE, instance_url=INSTANCE_URL) mock_request.assert_called_once_with( url=f"{INSTANCE_URL}/v3/namespaces/default/apps/{PIPELINE_NAME}", method="PUT", body=PIPELINE, ) @mock.patch(HOOK_STR.format("DataFusionHook._cdap_request")) def test_delete_pipeline(self, mock_request, hook): mock_request.return_value.status = 200 hook.delete_pipeline(pipeline_name=PIPELINE_NAME, instance_url=INSTANCE_URL) mock_request.assert_called_once_with( url=f"{INSTANCE_URL}/v3/namespaces/default/apps/{PIPELINE_NAME}", method="DELETE", body=None, ) @mock.patch(HOOK_STR.format("DataFusionHook._cdap_request")) def test_list_pipelines(self, mock_request, hook): data = {"data": "test"} mock_request.return_value.status = 200 mock_request.return_value.data = json.dumps(data) result = hook.list_pipelines(instance_url=INSTANCE_URL) mock_request.assert_called_once_with( url=f"{INSTANCE_URL}/v3/namespaces/default/apps", method="GET", body=None ) assert result == data @mock.patch(HOOK_STR.format("DataFusionHook._cdap_request")) def test_start_pipeline(self, mock_request, hook): run_id = 1234 mock_request.return_value = mock.MagicMock(status=200, data=f'[{{'runId':{run_id}}}]') hook.start_pipeline(pipeline_name=PIPELINE_NAME, instance_url=INSTANCE_URL, runtime_args=RUNTIME_ARGS) body = [ { "appId": PIPELINE_NAME, "programType": "workflow", "programId": "DataPipelineWorkflow", "runtimeargs": RUNTIME_ARGS, } ] mock_request.assert_called_once_with( url=f"{INSTANCE_URL}/v3/namespaces/default/start", method="POST", body=body ) @mock.patch(HOOK_STR.format("DataFusionHook._cdap_request")) def test_stop_pipeline(self, mock_request, hook): mock_request.return_value.status = 200 hook.stop_pipeline(pipeline_name=PIPELINE_NAME, instance_url=INSTANCE_URL) mock_request.assert_called_once_with( url=f"{INSTANCE_URL}/v3/namespaces/default/apps/{PIPELINE_NAME}/" f"workflows/DataPipelineWorkflow/stop", method="POST", )
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json from unittest import mock import pytest from airflow.providers.google.cloud.hooks.datafusion import DataFusionHook from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id API_VERSION = "v1beta1" GCP_CONN_ID = "google_cloud_default" HOOK_STR = "airflow.providers.google.cloud.hooks.datafusion.{}" LOCATION = "test-location" INSTANCE_NAME = "airflow-test-instance" INSTANCE = {"type": "BASIC", "displayName": INSTANCE_NAME} PROJECT_ID = "test_project_id" PIPELINE_NAME = "shrubberyPipeline" PIPELINE = {"test": "pipeline"} INSTANCE_URL = "http://datafusion.instance.com" RUNTIME_ARGS = {"arg1": "a", "arg2": "b"} @pytest.fixture def hook(): with mock.patch( "airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__", new=mock_base_gcp_hook_default_project_id, ): yield DataFusionHook(gcp_conn_id=GCP_CONN_ID) class TestDataFusionHook: @staticmethod def mock_endpoint(get_conn_mock): return get_conn_mock.return_value.projects.return_value.locations.return_value.instances.return_value def test_name(self, hook): expected = f"projects/{PROJECT_ID}/locations/{LOCATION}/instances/{INSTANCE_NAME}" assert hook._name(PROJECT_ID, LOCATION, INSTANCE_NAME) == expected def test_parent(self, hook): expected = f"projects/{PROJECT_ID}/locations/{LOCATION}" assert hook._parent(PROJECT_ID, LOCATION) == expected @mock.patch(HOOK_STR.format("build")) @mock.patch(HOOK_STR.format("DataFusionHook._authorize")) def test_get_conn(self, mock_authorize, mock_build, hook): mock_authorize.return_value = "test" hook.get_conn() mock_build.assert_called_once_with("datafusion", hook.api_version, http="test", cache_discovery=False) @mock.patch(HOOK_STR.format("DataFusionHook.get_conn")) def test_restart_instance(self, get_conn_mock, hook): method_mock = self.mock_endpoint(get_conn_mock).restart method_mock.return_value.execute.return_value = "value" result = hook.restart_instance(instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID) assert result == "value" method_mock.assert_called_once_with(name=hook._name(PROJECT_ID, LOCATION, INSTANCE_NAME)) @mock.patch(HOOK_STR.format("DataFusionHook.get_conn")) def test_delete_instance(self, get_conn_mock, hook): method_mock = self.mock_endpoint(get_conn_mock).delete method_mock.return_value.execute.return_value = "value" result = hook.delete_instance(instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID) assert result == "value" method_mock.assert_called_once_with(name=hook._name(PROJECT_ID, LOCATION, INSTANCE_NAME)) @mock.patch(HOOK_STR.format("DataFusionHook.get_conn")) def test_create_instance(self, get_conn_mock, hook): method_mock = self.mock_endpoint(get_conn_mock).create method_mock.return_value.execute.return_value = "value" result = hook.create_instance( instance_name=INSTANCE_NAME, instance=INSTANCE, location=LOCATION, project_id=PROJECT_ID, ) assert result == "value" method_mock.assert_called_once_with( parent=hook._parent(PROJECT_ID, LOCATION), body=INSTANCE, instanceId=INSTANCE_NAME, ) @mock.patch(HOOK_STR.format("DataFusionHook.get_conn")) def test_patch_instance(self, get_conn_mock, hook): method_mock = self.mock_endpoint(get_conn_mock).patch method_mock.return_value.execute.return_value = "value" result = hook.patch_instance( instance_name=INSTANCE_NAME, instance=INSTANCE, update_mask="instance.name", location=LOCATION, project_id=PROJECT_ID, ) assert result == "value" method_mock.assert_called_once_with( name=hook._name(PROJECT_ID, LOCATION, INSTANCE_NAME), body=INSTANCE, updateMask="instance.name", ) @mock.patch(HOOK_STR.format("DataFusionHook.get_conn")) def test_get_instance(self, get_conn_mock, hook): method_mock = self.mock_endpoint(get_conn_mock).get method_mock.return_value.execute.return_value = "value" result = hook.get_instance(instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID) assert result == "value" method_mock.assert_called_once_with(name=hook._name(PROJECT_ID, LOCATION, INSTANCE_NAME)) @mock.patch("google.auth.transport.requests.Request") @mock.patch(HOOK_STR.format("DataFusionHook._get_credentials")) def test_cdap_request(self, get_credentials_mock, mock_request, hook): url = "test_url" headers = {"Content-Type": "application/json"} method = "POST" request = mock_request.return_value request.return_value = mock.MagicMock() body = {"data": "value"} result = hook._cdap_request(url=url, method=method, body=body) mock_request.assert_called_once_with() get_credentials_mock.assert_called_once_with() get_credentials_mock.return_value.before_request.assert_called_once_with( request=request, method=method, url=url, headers=headers ) request.assert_called_once_with(method=method, url=url, headers=headers, body=json.dumps(body)) assert result == request.return_value @mock.patch(HOOK_STR.format("DataFusionHook._cdap_request")) def test_create_pipeline(self, mock_request, hook): mock_request.return_value.status = 200 hook.create_pipeline(pipeline_name=PIPELINE_NAME, pipeline=PIPELINE, instance_url=INSTANCE_URL) mock_request.assert_called_once_with( url=f"{INSTANCE_URL}/v3/namespaces/default/apps/{PIPELINE_NAME}", method="PUT", body=PIPELINE, ) @mock.patch(HOOK_STR.format("DataFusionHook._cdap_request")) def test_delete_pipeline(self, mock_request, hook): mock_request.return_value.status = 200 hook.delete_pipeline(pipeline_name=PIPELINE_NAME, instance_url=INSTANCE_URL) mock_request.assert_called_once_with( url=f"{INSTANCE_URL}/v3/namespaces/default/apps/{PIPELINE_NAME}", method="DELETE", body=None, ) @mock.patch(HOOK_STR.format("DataFusionHook._cdap_request")) def test_list_pipelines(self, mock_request, hook): data = {"data": "test"} mock_request.return_value.status = 200 mock_request.return_value.data = json.dumps(data) result = hook.list_pipelines(instance_url=INSTANCE_URL) mock_request.assert_called_once_with( url=f"{INSTANCE_URL}/v3/namespaces/default/apps", method="GET", body=None ) assert result == data @mock.patch(HOOK_STR.format("DataFusionHook._cdap_request")) def test_start_pipeline(self, mock_request, hook): run_id = 1234 mock_request.return_value = mock.MagicMock(status=200, data=f'[{{"runId":{run_id}}}]') hook.start_pipeline(pipeline_name=PIPELINE_NAME, instance_url=INSTANCE_URL, runtime_args=RUNTIME_ARGS) body = [ { "appId": PIPELINE_NAME, "programType": "workflow", "programId": "DataPipelineWorkflow", "runtimeargs": RUNTIME_ARGS, } ] mock_request.assert_called_once_with( url=f"{INSTANCE_URL}/v3/namespaces/default/start", method="POST", body=body ) @mock.patch(HOOK_STR.format("DataFusionHook._cdap_request")) def test_stop_pipeline(self, mock_request, hook): mock_request.return_value.status = 200 hook.stop_pipeline(pipeline_name=PIPELINE_NAME, instance_url=INSTANCE_URL) mock_request.assert_called_once_with( url=f"{INSTANCE_URL}/v3/namespaces/default/apps/{PIPELINE_NAME}/" f"workflows/DataPipelineWorkflow/stop", method="POST", )
import threading from typing import List, Dict import requests class User: def __init__(self, user_json: dict): self.raw = user_json self.id: int = user_json['id'] self.is_bot: bool = user_json['is_bot'] if 'last_name' in user_json: self.name = f"{user_json["first_name"]} {user_json["last_name"]}" else: self.name = user_json['first_name'] if 'username' in user_json: self.username: str = user_json['username'] self.link = 't.me/' + self.username else: self.username = '' self.link = '' class Bot(User): def __init__(self, config: dict): self.config = config self.token: str = self.config['token'] self.base_url = 'https://api.telegram.org/bot' + self.token + '/' if 'proxy' in self.config and self.config['proxy']['enable']: self.proxy_kw = {'proxies': {'https': self.config['proxy']['proxy_url']}} else: self.proxy_kw = {} get_me_resp: dict = requests.get(self.base_url + 'getMe', **self.proxy_kw).json() if not get_me_resp['ok']: raise APIError('Bot initialization failed.' + get_me_resp['description']) super().__init__(get_me_resp['result']) self.can_join_groups: bool = get_me_resp['result']['can_join_groups'] self.can_read_all_group_messages: bool = get_me_resp['result']['can_read_all_group_messages'] self.supports_inline_queries: bool = get_me_resp['result']['supports_inline_queries'] self.msg_tasks = [] self.query_tasks = [] self.member_status_tasks = [] def api(self, action: str, data: dict): resp = requests.post(self.base_url + action, json=data, **self.proxy_kw).json() if not resp['ok']: raise APIError(f'API request "{action}' failed. {resp['description']}') return resp['result'] def get_updates(self, offset: int = 0, timeout: int = 60) -> list: update_data = {'offset': offset, 'timeout': timeout, 'allowed_updates': [ # Accept all updates, but only part of them are available in catbot 'message', # Available 'edited_message', 'channel_post', 'edited_channel_post', 'inline_query', 'chosen_inline_result', 'callback_query', # Available 'shipping_query', 'pre_checkout_query', 'poll', 'poll_answer', 'my_chat_member', 'chat_member' # Available ]} updates = self.api('getUpdates', update_data) print(updates) return updates def add_msg_task(self, criteria, action, **action_kw): """ Add tasks for the bot to process. For message updates only. Use add_query_task for callback query updates. :param criteria: A function that lead flow of program into "action" function. It should take a Message-like object as the only argument and returns a bool. When it returns True, "action" will be executed. An example is to return True if the message starts with "/start", which is the standard starting of private chats with users. :param action: A function to be executed when criteria returns True. Typically it's the response on users' actions. It should take a Message-like object as the only positional argument and accept keyword arguments. Arguments in action_kw will be passed to it. :param action_kw: Keyword arguments that will be passed to action when it is called. :return: """ self.msg_tasks.append((criteria, action, action_kw)) def add_query_task(self, criteria, action, **action_kw): """ Similar to add_msg_task, which add criteria and action for callback queries, typically clicks from in-message buttons (I would like to call them in-message instead of inline, which is used by Telegram). """ self.query_tasks.append((criteria, action, action_kw)) def add_member_status_task(self, criteria, action, **action_kw): """ Similar to add_msg_task, which add criteria and action for chat member updates. """ self.member_status_tasks.append((criteria, action, action_kw)) def start(self): old_updates = self.get_updates(timeout=0) update_offset = old_updates[-1]['update_id'] + 1 if old_updates else 0 while True: try: updates = self.get_updates(update_offset) except APIError as e: print(e.args[0]) continue for item in updates: update_offset = item['update_id'] + 1 if 'message' in item.keys(): msg = Message(item['message']) for criteria, action, action_kw in self.msg_tasks: if criteria(msg): threading.Thread(target=action, args=(msg,), kwargs=action_kw).start() elif 'callback_query' in item.keys(): query = CallbackQuery(item['callback_query']) if not hasattr(query, 'msg'): continue for criteria, action, action_kw in self.query_tasks: if criteria(query): threading.Thread(target=action, args=(query,), kwargs=action_kw).start() elif 'chat_member' in item.keys(): member_update = ChatMemberUpdate(item['chat_member']) for criteria, action, action_kw in self.member_status_tasks: if criteria(member_update): threading.Thread(target=action, args=(member_update,), kwargs=action_kw).start() else: continue def send_message(self, chat_id, **kw): """ :param chat_id: Unique identifier for the target chat or username of the target channel :param kw: Keyword arguments defined in Telegram bot api. See https://core.telegram.org/bots/api#sendmessage<br> General keywords:<br> - parse_mode: Optional. Should be one of MarkdownV2 or HTML or Markdown.<br> - disable_web_page_preview: Optional. Should be True or False. Disables link previews for links in this message.<br> - disable_notification: Optional. Should be True or False. Sends the message silently. Users will receive a notification with no sound.<br> - reply_to_message_id: Optional. If the message is a reply, ID of the original message.<br> - allow_sending_without_reply: Optional. Pass True, if the message should be sent even if the specified replied-to message is not found<br> For plain text messages:<br> - text: Text of the message to be sent, 1-4096 characters after entities parsing.<br> - reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. A common content of this param is an InlineKeyboard object.<br> :return: """ if 'reply_markup' in kw.keys(): kw['reply_markup'] = kw['reply_markup'].parse() msg_kw = {'chat_id': chat_id, **kw} return Message(self.api('sendMessage', msg_kw)) def edit_message(self, chat_id, msg_id, **kw): if 'reply_markup' in kw.keys(): kw['reply_markup'] = kw['reply_markup'].parse() msg_kw = {'chat_id': chat_id, 'message_id': msg_id, **kw} try: return Message(self.api('editMessageText', msg_kw)) except APIError as e: if 'message is not modified' in e.args[0]: pass else: raise def forward_message(self, from_chat_id, to_chat_id, msg_id: int, disable_notification=False): """ :param from_chat_id: Unique identifier for the chat where the original message was sent :param to_chat_id: Unique identifier for the target chat or username of the target channel :param msg_id: Message identifier in the chat specified in from_chat_id :param disable_notification: Optional. Sends the message silently. Users will receive a notification with no sound. :return: The forwarded message. """ return Message(self.api('forwardMessage', {'from_chat_id': from_chat_id, 'chat_id': to_chat_id, 'message_id': msg_id, 'disable_notification': disable_notification})) def answer_callback_query(self, callback_query_id, **kwargs) -> bool: """ :param callback_query_id: callback_query_id you receive in callback_query :param kwargs: Keyword arguments defined in Telegram bot api. You should always call this method after receiving a valid callback_query, even if you have nothing to send back to user. See https://core.telegram.org/bots/api#answercallbackquery - text: Optional. Text of the notification. If not specified, nothing will be shown to the user, 0-200 characters. - show_alert: Optional. If true, an alert will be shown by the client instead of a notification at the top of the chat screen. Defaults to false. - cache_time: Optional. The maximum amount of time in seconds that the result of the callback query may be cached client-side. Telegram apps will support caching starting in version 3.14. Defaults to 0. :return: """ return self.api('answerCallbackQuery', {'callback_query_id': callback_query_id, **kwargs}) def get_chat(self, chat_id: int): try: chat = Chat(self.api('getChat', {'chat_id': chat_id})) except APIError as e: if e.args[0] == 'Bad Request: chat not found': raise ChatNotFoundError else: raise else: return chat def get_chat_member(self, chat_id: int, user_id: int): """ Typically, use this method to build a ChatMember object. :param chat_id: ID of the chat that the ChatMember object will belong to. :param user_id: ID of the target user. :return: A ChatMember object, including info about permissions granted to the user in a specific chat. """ try: chat_member = ChatMember(self.api('getChatMember', {'chat_id': chat_id, 'user_id': user_id}), chat_id) except APIError as e: if 'Bad Request: user not found' in e.args[0]: raise UserNotFoundError else: raise else: return chat_member def restrict_chat_member(self, chat_id: int, user_id: int, until: int = 5, **permissions) -> bool: """ :param chat_id: Unique identifier for the target chat or username of the target supergroup :param user_id: Unique identifier of the target user :param until: Optional. Time when restrictions will be lifted for the user, unix time. If user is restricted for more than 366 days or less than 30 seconds from the current time, they are considered to be restricted forever. Default: Forever :param permissions: Chat permissions defined in Telegram bot api. Left blank to restrict all actions except reading. See https://core.telegram.org/bots/api#chatpermissions - can_send_messages: Optional. True, if the user is allowed to send text messages, contacts, locations and venues - can_send_media_messages: Optional. True, if the user is allowed to send audios, documents, photos, videos, video notes and voice notes, implies can_send_messages - can_send_polls: Optional. True, if the user is allowed to send polls, implies can_send_messages - can_send_other_messages: Optional. True, if the user is allowed to send animations, games, stickers and use inline bots, implies can_send_media_messages - can_add_web_page_previews: Optional. True, if the user is allowed to add web page previews to their messages, implies can_send_media_messages - can_change_info: Optional. True, if the user is allowed to change the chat title, photo and other settings. Ignored in public supergroups - can_invite_users: Optional. True, if the user is allowed to invite new users to the chat - can_pin_messages: Optional. True, if the user is allowed to pin messages. Ignored in public supergroups :return: Return True on success, otherwise raise exception. """ try: result = self.api('restrictChatMember', {'chat_id': chat_id, 'user_id': user_id, 'until_date': until, 'permissions': permissions}) except APIError as e: if 'Bad Request: not enough rights to restrict/unrestrict chat member' in e.args[0]: raise InsufficientRightError elif 'Bad Request: user not found' in e.args[0]: raise UserNotFoundError elif 'Bad Request: user is an administrator' in e.args[0] or \ 'Bad Request: can\'t remove chat owner' in e.args[0] or \ 'Bad Request: not enough rights' in e.args[0]: raise RestrictAdminError else: raise else: return result def silence_chat_member(self, chat_id: int, user_id: int, until: int = 5) -> bool: """ Remove can_send_messages permission from specified user. :param chat_id: Unique identifier for the target chat or username of the target supergroup :param user_id: Unique identifier of the target user :param until: Optional. Time when restrictions will be lifted for the user, unix time. If user is restricted for more than 366 days or less than 30 seconds from the current time, they are considered to be restricted forever. Default: Forever :return: Return True on success, otherwise raise exception. """ try: result = self.api('restrictChatMember', {'chat_id': chat_id, 'user_id': user_id, 'until_date': until, 'permissions': {'can_send_messages': False}}) except APIError as e: if 'Bad Request: not enough rights to restrict/unrestrict chat member' in e.args[0]: raise InsufficientRightError elif 'Bad Request: user not found' in e.args[0]: raise UserNotFoundError elif 'Bad Request: user is an administrator' in e.args[0] or \ 'Bad Request: can\'t remove chat owner' in e.args[0] or \ 'Bad Request: not enough rights' in e.args[0]: raise RestrictAdminError else: raise else: return result def lift_restrictions(self, chat_id: int, user_id: int) -> bool: """ Lift all restrictions on specified user. :param chat_id: Unique identifier for the target chat or username of the target supergroup :param user_id: Unique identifier of the target user :return: Return True on success, otherwise raise exception. """ try: result = self.api('restrictChatMember', {'chat_id': chat_id, 'user_id': user_id, 'permissions': {'can_send_messages': True, 'can_send_media_messages': True, 'can_send_polls': True, 'can_send_other_messages': True, 'can_add_web_page_previews': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True}}) except APIError as e: if 'Bad Request: not enough rights to restrict/unrestrict chat member' in e.args[0]: raise InsufficientRightError elif 'Bad Request: user not found' in e.args[0]: raise UserNotFoundError elif 'Bad Request: user is an administrator' in e.args[0] or \ 'Bad Request: can\'t remove chat owner' in e.args[0] or \ 'Bad Request: not enough rights' in e.args[0]: raise RestrictAdminError else: raise else: return result def kick_chat_member(self, chat_id: int, user_id: int, until: int = 0, no_ban: bool = False) -> bool: """ Kick chat member out. See https://core.telegram.org/bots/api#kickchatmember :param chat_id: Unique identifier for the target chat or username of the target supergroup :param user_id: Unique identifier of the target user :param until: Optional, default 0 (infinite ban). Date when the user will be unbanned, unix time. If user is banned for more than 366 days or less than 30 seconds from the current time they are considered to be banned forever :param no_ban: Kick out and then allow the user to join or send messages (from channel or somewhere else) :return: Return True on success, otherwise raise exception. """ try: if no_ban: # That the way Telegram API acts result = self.api('unbanChatMember', {'chat_id': chat_id, 'user_id': user_id}) else: result = self.api('kickChatMember', {'chat_id': chat_id, 'user_id': user_id, 'until_date': until}) except APIError as e: if 'Bad Request: not enough rights to restrict/unrestrict chat member' in e.args[0]: raise InsufficientRightError elif 'Bad Request: user not found' in e.args[0]: raise UserNotFoundError elif 'Bad Request: user is an administrator' in e.args[0] or \ 'Bad Request: can\'t remove chat owner' in e.args[0] or \ 'Bad Request: not enough rights' in e.args[0]: raise RestrictAdminError else: raise else: return result def unban_chat_member(self, chat_id: int, user_id: int) -> bool: """ Unban a banned user. See https://core.telegram.org/bots/api#unbanchatmember :param chat_id: Unique identifier for the target chat or username of the target supergroup :param user_id: Unique identifier of the target user """ try: result = self.api('unbanChatMember', {'chat_id': chat_id, 'user_id': user_id, 'only_if_banned': True}) except APIError as e: if 'Bad Request: not enough rights to restrict/unrestrict chat member' in e.args[0]: raise InsufficientRightError elif 'Bad Request: user not found' in e.args[0]: raise UserNotFoundError elif 'Bad Request: user is an administrator' in e.args[0] or \ 'Bad Request: can\'t remove chat owner' in e.args[0] or \ 'Bad Request: not enough rights' in e.args[0]: raise RestrictAdminError else: raise else: return result def delete_message(self, chat_id: int, msg_id: int) -> bool: try: result = self.api('deleteMessage', {'chat_id': chat_id, 'message_id': msg_id}) except APIError as e: if 'Bad Request: message to delete not found' in e.args[0] or \ 'Bad Request: message can\'t be deleted' in e.args[0]: raise DeleteMessageError else: raise else: return result class ChatMember(User): def __init__(self, member_json: dict, chat_id: int): """ Typically, build a ChatMember object from Bot.get_chat_member() method, which automatically get corresponding Chat object. :param member_json: Raw response from "getChatMember" API :param chat_id: ID of the chat which this ChatMember belongs to. """ super().__init__(member_json['user']) self.raw = f'{{'chat_member': {member_json}, "chat_id": {chat_id}}}' self.chat_id: int = chat_id # Can be “creator”, “administrator”, “member”, “restricted”, “left” or “kicked” self.status: str = member_json['status'] if self.status == 'administrator' or self.status == 'creator': self.is_anonymous: str = member_json['is_anonymous'] if self.status == 'administrator': self.can_be_edited: bool = member_json['can_be_edited'] self.can_delete_messages: bool = member_json['can_delete_messages'] self.can_promote_members: bool = member_json['can_promote_members'] if self.status == 'administrator' or self.status == 'restricted': self.can_change_info: bool = member_json['can_change_info'] self.can_invite_users: bool = member_json['can_invite_users'] self.can_pin_messages: bool = member_json['can_pin_messages'] if self.status == 'restricted': self.until_date: int = member_json['until_date'] self.is_member: bool = member_json['is_member'] self.can_send_messages: bool = member_json['can_send_messages'] self.can_send_media_messages: bool = member_json['can_send_media_messages'] self.can_send_polls: bool = member_json['can_send_polls'] self.can_send_other_messages: bool = member_json['can_send_other_messages'] # sticker, gif and inline bot self.can_add_web_page_previews: bool = member_json['can_add_web_page_previews'] # "embed links" in client if self.status == 'kicked': self.until_date: int = member_json['until_date'] if 'custom_title' in member_json.keys(): self.custom_title: str = member_json['custom_title'] def __str__(self): return self.raw class Message: def __init__(self, msg_json: dict): self.raw = msg_json self.chat = Chat(msg_json['chat']) self.id: int = msg_json['message_id'] # Empty for message in channels if 'from' in msg_json.keys(): self.from_ = User(msg_json['from']) if str(self.chat.id).startswith('-100'): self.link = f't.me/c/{str(self.chat.id).replace('-100', '')}/{self.id}' else: self.link = '' # The channel itself for channel messages. The supergroup itself for messages from anonymous group # administrators. The linked channel for messages automatically forwarded to the discussion group if 'sender_chat' in msg_json.keys(): self.sender_chat = Chat(msg_json['sender_chat']) self.date: int = msg_json['date'] # Signature of the post author for messages in channels, or the custom title of an anonymous group administrator if 'author_signature' in msg_json.keys(): self.author_signature: str = msg_json['author_signature'] if 'forward_from' in msg_json.keys(): # forwarded from users who allowed a link to their account in forwarded message self.forward_from = User(msg_json['forward_from']) self.forward = True elif 'forward_sender_name' in msg_json.keys(): # forwarded from users who disallowed a link to their account in forwarded message self.forward_sender_name: str = msg_json['forward_sender_name'] self.forward = True elif 'forward_from_message_id' in msg_json.keys(): # forwarded from channels self.forward_from_chat = Chat(msg_json['forward_from_chat']) self.forward_from_message_id: int = msg_json['forward_from_message_id'] if 'forward_signature' in msg_json.keys(): self.forward_signature: str = msg_json['forward_signature'] else: self.forward_signature = '' self.forward = True elif 'forward_from_chat' in msg_json.keys(): # forwarded from anonymous admins self.forward_from_chat = Chat(msg_json['forward_from_chat']) self.forward = True else: self.forward = False if self.forward: self.forward_date: int = msg_json['forward_date'] if 'reply_to_message' in msg_json.keys(): self.reply_to_message = Message(msg_json['reply_to_message']) self.reply = True else: self.reply = False if 'edit_date' in msg_json.keys(): self.edit_date: int = msg_json['edit_date'] self.edit = True else: self.edit = False if 'text' in msg_json.keys(): self.text: str = msg_json['text'] elif 'caption' in msg_json.keys(): self.text: str = msg_json['caption'] else: self.text: str = '' if 'new_chat_members' in msg_json.keys(): self.new_chat_members: List[User] = [] for user_json in msg_json['new_chat_members']: self.new_chat_members.append(User(user_json)) if 'left_chat_member' in msg_json.keys(): self.left_chat_member: User = User(msg_json['left_chat_member']) self.mentions = [] self.hashtags = [] self.cashtags = [] self.commands = [] self.links = [] self.bolds = [] self.italics = [] self.underlines = [] self.strikethroughs = [] self.codes = [] self.text_links = [] self.text_mention = [] self.html_formatted_text = self.text if 'entities' in msg_json.keys() or 'caption_entities' in msg_json.keys(): entity_type = 'entities' if 'entities' in msg_json.keys() else 'caption_entities' entity_to_be_formatted = [] for item in msg_json[entity_type]: offset = item['offset'] length = item['length'] if item['type'] == 'mention': self.mentions.append(self.text[offset:offset + length]) elif item['type'] == 'hashtag': self.hashtags.append(self.text[offset:offset + length]) elif item['type'] == 'cashtag': self.cashtags.append(self.text[offset:offset + length]) elif item['type'] == 'bot_command': self.commands.append(self.text[offset:offset + length]) elif item['type'] == 'url': self.links.append(self.text[offset:offset + length]) elif item['type'] == 'bold': self.bolds.append(self.text[offset:offset + length]) entity_to_be_formatted.append(item) elif item['type'] == 'italic': self.italics.append(self.text[offset:offset + length]) entity_to_be_formatted.append(item) elif item['type'] == 'underline': self.underlines.append(self.text[offset:offset + length]) entity_to_be_formatted.append(item) elif item['type'] == 'strikethrough': self.strikethroughs.append(self.text[offset:offset + length]) entity_to_be_formatted.append(item) elif item['type'] == 'code': self.codes.append(self.text[offset:offset + length]) entity_to_be_formatted.append(item) elif item['type'] == 'text_link': self.text_links.append((self.text[offset:offset + length], item['url'])) entity_to_be_formatted.append(item) elif item['type'] == 'text_mention': self.text_mention.append((self.text[offset:offset + length], User(item['user']))) entity_to_be_formatted.append(item) entity_to_be_formatted = sorted(entity_to_be_formatted, key=lambda x: x['offset'], reverse=True) for item in entity_to_be_formatted: offset = item['offset'] length = item['length'] if item['type'] == 'bold': self.html_formatted_text = self.text[:offset] + f'<b>{self.text[offset:offset + length]}</b>' + \ self.html_formatted_text[offset + length:] elif item['type'] == 'italic': self.html_formatted_text = self.text[:offset] + f'<i>{self.text[offset:offset + length]}</i>' + \ self.html_formatted_text[offset + length:] elif item['type'] == 'underline': self.html_formatted_text = self.text[:offset] + f'<u>{self.text[offset:offset + length]}</u>' + \ self.html_formatted_text[offset + length:] elif item['type'] == 'strikethrough': self.html_formatted_text = self.text[:offset] + f'<s>{self.text[offset:offset + length]}</s>' + \ self.html_formatted_text[offset + length:] elif item['type'] == 'code': self.html_formatted_text = self.text[:offset] + \ f'<code>{self.text[offset:offset + length]}</code>' + \ self.html_formatted_text[offset + length:] elif item['type'] == 'text_link': self.html_formatted_text = self.text[:offset] + f"<a href=\"{item["url"]}\">" \ f"{self.text[offset:offset + length]}</a>" + \ self.html_formatted_text[offset + length:] elif item['type'] == 'text_mention': self.html_formatted_text = self.text[:offset] + f"<a href=\"tg://user?id={item["user"]["id"]}\">" \ f"{self.text[offset:offset + length]}</a>" + \ self.html_formatted_text[offset + length:] if 'dice' in msg_json.keys(): self.dice = True self.dice_emoji = msg_json['dice']['emoji'] self.dice_value = msg_json['dice']['value'] else: self.dice = False if 'reply_markup' in msg_json.keys(): self.reply_markup: InlineKeyboard = InlineKeyboard.from_json(msg_json['reply_markup']) def __str__(self): return self.raw class InlineKeyboardButton: def __init__(self, text: str, **kwargs): """ :param text: Text showed on the button. :param kwargs: Other optional params defined in Telegram bot api. See https://core.telegram.org/bots/api#inlinekeyboardbutton - url: Optional. HTTP or tg:// url to be opened when button is pressed - callback_data: Optional. Data to be sent in a callback query to the bot when button is pressed, 1-64 bytes """ self.text = text if len(kwargs) == 0: raise APIError('Inline keyboard button must have either url or callback_data.') if 'url' in kwargs.keys(): self.url = kwargs['url'] if 'callback_data' in kwargs.keys(): self.callback_data = kwargs['callback_data'] @classmethod def from_json(cls, button_json: dict): return cls(**button_json) def parse(self) -> dict: """ :return: self.__dict__ for follow-up usage like json serialization. """ return self.__dict__ class InlineKeyboard: def __init__(self, key_list: List[List[InlineKeyboardButton]]): """ :param key_list: Use InlineKeyBoardButton to structure the buttons you want and pass it into this initializer. Each sublist represent a row. Buttons in the same sublist will be placed in the same row. """ self.key_list = key_list @classmethod def from_json(cls, markup_json: dict): markup_list: List[List[dict]] = markup_json['inline_keyboard'] key_list: List[List[InlineKeyboardButton]] = [] for i in range(len(markup_json)): key_list.append([]) for j in range(len(markup_json)): key_list[i].append(InlineKeyboardButton.from_json(markup_list[i][j])) return cls(key_list) def parse(self) -> Dict[str, List[List[Dict]]]: key_list: List[List[dict]] = [] for i in range(len(self.key_list)): key_list.append([]) for j in range(len(self.key_list[i])): key_list[i].append(self.key_list[i][j].parse()) return {'inline_keyboard': key_list} class CallbackQuery: def __init__(self, query_json: dict): self.raw = query_json self.id: str = query_json['id'] self.from_ = User(query_json['from']) if 'message' not in query_json.keys(): self.msg = '' else: self.msg = Message(query_json['message']) self.chat_instance: str = query_json['chat_instance'] if 'data' in query_json.keys(): self.data: str = query_json['data'] else: self.data = '' if 'inline_message_id' in query_json.keys(): self.inline_message_id: str = query_json['inline_message_id'] else: self.inline_message_id = '' def __str__(self): return self.raw class ChatMemberUpdate: def __init__(self, update_json: dict): self.raw = update_json self.chat = Chat(update_json['chat']) self.from_ = User(update_json['from']) self.date: int = update_json['date'] self.old_chat_member = ChatMember(update_json['old_chat_member'], self.chat.id) self.new_chat_member = ChatMember(update_json['new_chat_member'], self.chat.id) def __str__(self): return self.raw class Chat: def __init__(self, chat_json: dict): self.raw = chat_json self.id: int = chat_json['id'] self.type: str = chat_json['type'] if self.type == 'supergroup' or self.type == 'group' or self.type == 'channel': self.name: str = chat_json['title'] else: if 'last_name' in chat_json.keys(): self.name = f'{chat_json['first_name']} {chat_json['last_name']}' else: self.name = chat_json['first_name'] if 'username' in chat_json.keys(): self.username: str = chat_json['username'] self.link = 't.me/' + self.username else: self.username = '' self.link = '' # Returned by get_chat if 'bio' in chat_json.keys(): # If the chat is private chat self.bio: str = chat_json['bio'] if 'description' in chat_json.keys(): # If the chat is group, supergroup or channel self.description: str = chat_json['description'] if 'pinned_message' in chat_json.keys(): self.pinned_message = Message(chat_json['pinned_message']) if 'slow_mode_delay' in chat_json.keys(): # If the chat is supergroup self.slow_mode_delay: int = chat_json['slow_mode_delay'] if 'linked_chat_id' in chat_json.keys(): # If the supergroup or channel has a linked channel or supergroup, respectively self.linked_chat_id: int = chat_json['linked_chat_id'] def __str__(self): return self.raw class APIError(Exception): pass class UserNotFoundError(APIError): pass class ChatNotFoundError(APIError): pass class InsufficientRightError(APIError): pass class RestrictAdminError(APIError): pass class DeleteMessageError(APIError): pass
import threading from typing import List, Dict import requests class User: def __init__(self, user_json: dict): self.raw = user_json self.id: int = user_json['id'] self.is_bot: bool = user_json['is_bot'] if 'last_name' in user_json: self.name = f"{user_json['first_name']} {user_json['last_name']}" else: self.name = user_json['first_name'] if 'username' in user_json: self.username: str = user_json['username'] self.link = 't.me/' + self.username else: self.username = '' self.link = '' class Bot(User): def __init__(self, config: dict): self.config = config self.token: str = self.config['token'] self.base_url = 'https://api.telegram.org/bot' + self.token + '/' if 'proxy' in self.config and self.config['proxy']['enable']: self.proxy_kw = {'proxies': {'https': self.config['proxy']['proxy_url']}} else: self.proxy_kw = {} get_me_resp: dict = requests.get(self.base_url + 'getMe', **self.proxy_kw).json() if not get_me_resp['ok']: raise APIError('Bot initialization failed.' + get_me_resp['description']) super().__init__(get_me_resp['result']) self.can_join_groups: bool = get_me_resp['result']['can_join_groups'] self.can_read_all_group_messages: bool = get_me_resp['result']['can_read_all_group_messages'] self.supports_inline_queries: bool = get_me_resp['result']['supports_inline_queries'] self.msg_tasks = [] self.query_tasks = [] self.member_status_tasks = [] def api(self, action: str, data: dict): resp = requests.post(self.base_url + action, json=data, **self.proxy_kw).json() if not resp['ok']: raise APIError(f'API request "{action}" failed. {resp["description"]}') return resp['result'] def get_updates(self, offset: int = 0, timeout: int = 60) -> list: update_data = {'offset': offset, 'timeout': timeout, 'allowed_updates': [ # Accept all updates, but only part of them are available in catbot 'message', # Available 'edited_message', 'channel_post', 'edited_channel_post', 'inline_query', 'chosen_inline_result', 'callback_query', # Available 'shipping_query', 'pre_checkout_query', 'poll', 'poll_answer', 'my_chat_member', 'chat_member' # Available ]} updates = self.api('getUpdates', update_data) print(updates) return updates def add_msg_task(self, criteria, action, **action_kw): """ Add tasks for the bot to process. For message updates only. Use add_query_task for callback query updates. :param criteria: A function that lead flow of program into "action" function. It should take a Message-like object as the only argument and returns a bool. When it returns True, "action" will be executed. An example is to return True if the message starts with "/start", which is the standard starting of private chats with users. :param action: A function to be executed when criteria returns True. Typically it's the response on users' actions. It should take a Message-like object as the only positional argument and accept keyword arguments. Arguments in action_kw will be passed to it. :param action_kw: Keyword arguments that will be passed to action when it is called. :return: """ self.msg_tasks.append((criteria, action, action_kw)) def add_query_task(self, criteria, action, **action_kw): """ Similar to add_msg_task, which add criteria and action for callback queries, typically clicks from in-message buttons (I would like to call them in-message instead of inline, which is used by Telegram). """ self.query_tasks.append((criteria, action, action_kw)) def add_member_status_task(self, criteria, action, **action_kw): """ Similar to add_msg_task, which add criteria and action for chat member updates. """ self.member_status_tasks.append((criteria, action, action_kw)) def start(self): old_updates = self.get_updates(timeout=0) update_offset = old_updates[-1]['update_id'] + 1 if old_updates else 0 while True: try: updates = self.get_updates(update_offset) except APIError as e: print(e.args[0]) continue for item in updates: update_offset = item['update_id'] + 1 if 'message' in item.keys(): msg = Message(item['message']) for criteria, action, action_kw in self.msg_tasks: if criteria(msg): threading.Thread(target=action, args=(msg,), kwargs=action_kw).start() elif 'callback_query' in item.keys(): query = CallbackQuery(item['callback_query']) if not hasattr(query, 'msg'): continue for criteria, action, action_kw in self.query_tasks: if criteria(query): threading.Thread(target=action, args=(query,), kwargs=action_kw).start() elif 'chat_member' in item.keys(): member_update = ChatMemberUpdate(item['chat_member']) for criteria, action, action_kw in self.member_status_tasks: if criteria(member_update): threading.Thread(target=action, args=(member_update,), kwargs=action_kw).start() else: continue def send_message(self, chat_id, **kw): """ :param chat_id: Unique identifier for the target chat or username of the target channel :param kw: Keyword arguments defined in Telegram bot api. See https://core.telegram.org/bots/api#sendmessage<br> General keywords:<br> - parse_mode: Optional. Should be one of MarkdownV2 or HTML or Markdown.<br> - disable_web_page_preview: Optional. Should be True or False. Disables link previews for links in this message.<br> - disable_notification: Optional. Should be True or False. Sends the message silently. Users will receive a notification with no sound.<br> - reply_to_message_id: Optional. If the message is a reply, ID of the original message.<br> - allow_sending_without_reply: Optional. Pass True, if the message should be sent even if the specified replied-to message is not found<br> For plain text messages:<br> - text: Text of the message to be sent, 1-4096 characters after entities parsing.<br> - reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. A common content of this param is an InlineKeyboard object.<br> :return: """ if 'reply_markup' in kw.keys(): kw['reply_markup'] = kw['reply_markup'].parse() msg_kw = {'chat_id': chat_id, **kw} return Message(self.api('sendMessage', msg_kw)) def edit_message(self, chat_id, msg_id, **kw): if 'reply_markup' in kw.keys(): kw['reply_markup'] = kw['reply_markup'].parse() msg_kw = {'chat_id': chat_id, 'message_id': msg_id, **kw} try: return Message(self.api('editMessageText', msg_kw)) except APIError as e: if 'message is not modified' in e.args[0]: pass else: raise def forward_message(self, from_chat_id, to_chat_id, msg_id: int, disable_notification=False): """ :param from_chat_id: Unique identifier for the chat where the original message was sent :param to_chat_id: Unique identifier for the target chat or username of the target channel :param msg_id: Message identifier in the chat specified in from_chat_id :param disable_notification: Optional. Sends the message silently. Users will receive a notification with no sound. :return: The forwarded message. """ return Message(self.api('forwardMessage', {'from_chat_id': from_chat_id, 'chat_id': to_chat_id, 'message_id': msg_id, 'disable_notification': disable_notification})) def answer_callback_query(self, callback_query_id, **kwargs) -> bool: """ :param callback_query_id: callback_query_id you receive in callback_query :param kwargs: Keyword arguments defined in Telegram bot api. You should always call this method after receiving a valid callback_query, even if you have nothing to send back to user. See https://core.telegram.org/bots/api#answercallbackquery - text: Optional. Text of the notification. If not specified, nothing will be shown to the user, 0-200 characters. - show_alert: Optional. If true, an alert will be shown by the client instead of a notification at the top of the chat screen. Defaults to false. - cache_time: Optional. The maximum amount of time in seconds that the result of the callback query may be cached client-side. Telegram apps will support caching starting in version 3.14. Defaults to 0. :return: """ return self.api('answerCallbackQuery', {'callback_query_id': callback_query_id, **kwargs}) def get_chat(self, chat_id: int): try: chat = Chat(self.api('getChat', {'chat_id': chat_id})) except APIError as e: if e.args[0] == 'Bad Request: chat not found': raise ChatNotFoundError else: raise else: return chat def get_chat_member(self, chat_id: int, user_id: int): """ Typically, use this method to build a ChatMember object. :param chat_id: ID of the chat that the ChatMember object will belong to. :param user_id: ID of the target user. :return: A ChatMember object, including info about permissions granted to the user in a specific chat. """ try: chat_member = ChatMember(self.api('getChatMember', {'chat_id': chat_id, 'user_id': user_id}), chat_id) except APIError as e: if 'Bad Request: user not found' in e.args[0]: raise UserNotFoundError else: raise else: return chat_member def restrict_chat_member(self, chat_id: int, user_id: int, until: int = 5, **permissions) -> bool: """ :param chat_id: Unique identifier for the target chat or username of the target supergroup :param user_id: Unique identifier of the target user :param until: Optional. Time when restrictions will be lifted for the user, unix time. If user is restricted for more than 366 days or less than 30 seconds from the current time, they are considered to be restricted forever. Default: Forever :param permissions: Chat permissions defined in Telegram bot api. Left blank to restrict all actions except reading. See https://core.telegram.org/bots/api#chatpermissions - can_send_messages: Optional. True, if the user is allowed to send text messages, contacts, locations and venues - can_send_media_messages: Optional. True, if the user is allowed to send audios, documents, photos, videos, video notes and voice notes, implies can_send_messages - can_send_polls: Optional. True, if the user is allowed to send polls, implies can_send_messages - can_send_other_messages: Optional. True, if the user is allowed to send animations, games, stickers and use inline bots, implies can_send_media_messages - can_add_web_page_previews: Optional. True, if the user is allowed to add web page previews to their messages, implies can_send_media_messages - can_change_info: Optional. True, if the user is allowed to change the chat title, photo and other settings. Ignored in public supergroups - can_invite_users: Optional. True, if the user is allowed to invite new users to the chat - can_pin_messages: Optional. True, if the user is allowed to pin messages. Ignored in public supergroups :return: Return True on success, otherwise raise exception. """ try: result = self.api('restrictChatMember', {'chat_id': chat_id, 'user_id': user_id, 'until_date': until, 'permissions': permissions}) except APIError as e: if 'Bad Request: not enough rights to restrict/unrestrict chat member' in e.args[0]: raise InsufficientRightError elif 'Bad Request: user not found' in e.args[0]: raise UserNotFoundError elif 'Bad Request: user is an administrator' in e.args[0] or \ 'Bad Request: can\'t remove chat owner' in e.args[0] or \ 'Bad Request: not enough rights' in e.args[0]: raise RestrictAdminError else: raise else: return result def silence_chat_member(self, chat_id: int, user_id: int, until: int = 5) -> bool: """ Remove can_send_messages permission from specified user. :param chat_id: Unique identifier for the target chat or username of the target supergroup :param user_id: Unique identifier of the target user :param until: Optional. Time when restrictions will be lifted for the user, unix time. If user is restricted for more than 366 days or less than 30 seconds from the current time, they are considered to be restricted forever. Default: Forever :return: Return True on success, otherwise raise exception. """ try: result = self.api('restrictChatMember', {'chat_id': chat_id, 'user_id': user_id, 'until_date': until, 'permissions': {'can_send_messages': False}}) except APIError as e: if 'Bad Request: not enough rights to restrict/unrestrict chat member' in e.args[0]: raise InsufficientRightError elif 'Bad Request: user not found' in e.args[0]: raise UserNotFoundError elif 'Bad Request: user is an administrator' in e.args[0] or \ 'Bad Request: can\'t remove chat owner' in e.args[0] or \ 'Bad Request: not enough rights' in e.args[0]: raise RestrictAdminError else: raise else: return result def lift_restrictions(self, chat_id: int, user_id: int) -> bool: """ Lift all restrictions on specified user. :param chat_id: Unique identifier for the target chat or username of the target supergroup :param user_id: Unique identifier of the target user :return: Return True on success, otherwise raise exception. """ try: result = self.api('restrictChatMember', {'chat_id': chat_id, 'user_id': user_id, 'permissions': {'can_send_messages': True, 'can_send_media_messages': True, 'can_send_polls': True, 'can_send_other_messages': True, 'can_add_web_page_previews': True, 'can_change_info': True, 'can_invite_users': True, 'can_pin_messages': True}}) except APIError as e: if 'Bad Request: not enough rights to restrict/unrestrict chat member' in e.args[0]: raise InsufficientRightError elif 'Bad Request: user not found' in e.args[0]: raise UserNotFoundError elif 'Bad Request: user is an administrator' in e.args[0] or \ 'Bad Request: can\'t remove chat owner' in e.args[0] or \ 'Bad Request: not enough rights' in e.args[0]: raise RestrictAdminError else: raise else: return result def kick_chat_member(self, chat_id: int, user_id: int, until: int = 0, no_ban: bool = False) -> bool: """ Kick chat member out. See https://core.telegram.org/bots/api#kickchatmember :param chat_id: Unique identifier for the target chat or username of the target supergroup :param user_id: Unique identifier of the target user :param until: Optional, default 0 (infinite ban). Date when the user will be unbanned, unix time. If user is banned for more than 366 days or less than 30 seconds from the current time they are considered to be banned forever :param no_ban: Kick out and then allow the user to join or send messages (from channel or somewhere else) :return: Return True on success, otherwise raise exception. """ try: if no_ban: # That the way Telegram API acts result = self.api('unbanChatMember', {'chat_id': chat_id, 'user_id': user_id}) else: result = self.api('kickChatMember', {'chat_id': chat_id, 'user_id': user_id, 'until_date': until}) except APIError as e: if 'Bad Request: not enough rights to restrict/unrestrict chat member' in e.args[0]: raise InsufficientRightError elif 'Bad Request: user not found' in e.args[0]: raise UserNotFoundError elif 'Bad Request: user is an administrator' in e.args[0] or \ 'Bad Request: can\'t remove chat owner' in e.args[0] or \ 'Bad Request: not enough rights' in e.args[0]: raise RestrictAdminError else: raise else: return result def unban_chat_member(self, chat_id: int, user_id: int) -> bool: """ Unban a banned user. See https://core.telegram.org/bots/api#unbanchatmember :param chat_id: Unique identifier for the target chat or username of the target supergroup :param user_id: Unique identifier of the target user """ try: result = self.api('unbanChatMember', {'chat_id': chat_id, 'user_id': user_id, 'only_if_banned': True}) except APIError as e: if 'Bad Request: not enough rights to restrict/unrestrict chat member' in e.args[0]: raise InsufficientRightError elif 'Bad Request: user not found' in e.args[0]: raise UserNotFoundError elif 'Bad Request: user is an administrator' in e.args[0] or \ 'Bad Request: can\'t remove chat owner' in e.args[0] or \ 'Bad Request: not enough rights' in e.args[0]: raise RestrictAdminError else: raise else: return result def delete_message(self, chat_id: int, msg_id: int) -> bool: try: result = self.api('deleteMessage', {'chat_id': chat_id, 'message_id': msg_id}) except APIError as e: if 'Bad Request: message to delete not found' in e.args[0] or \ 'Bad Request: message can\'t be deleted' in e.args[0]: raise DeleteMessageError else: raise else: return result class ChatMember(User): def __init__(self, member_json: dict, chat_id: int): """ Typically, build a ChatMember object from Bot.get_chat_member() method, which automatically get corresponding Chat object. :param member_json: Raw response from "getChatMember" API :param chat_id: ID of the chat which this ChatMember belongs to. """ super().__init__(member_json['user']) self.raw = f'{{"chat_member": {member_json}, "chat_id": {chat_id}}}' self.chat_id: int = chat_id # Can be “creator”, “administrator”, “member”, “restricted”, “left” or “kicked” self.status: str = member_json['status'] if self.status == 'administrator' or self.status == 'creator': self.is_anonymous: str = member_json['is_anonymous'] if self.status == 'administrator': self.can_be_edited: bool = member_json['can_be_edited'] self.can_delete_messages: bool = member_json['can_delete_messages'] self.can_promote_members: bool = member_json['can_promote_members'] if self.status == 'administrator' or self.status == 'restricted': self.can_change_info: bool = member_json['can_change_info'] self.can_invite_users: bool = member_json['can_invite_users'] self.can_pin_messages: bool = member_json['can_pin_messages'] if self.status == 'restricted': self.until_date: int = member_json['until_date'] self.is_member: bool = member_json['is_member'] self.can_send_messages: bool = member_json['can_send_messages'] self.can_send_media_messages: bool = member_json['can_send_media_messages'] self.can_send_polls: bool = member_json['can_send_polls'] self.can_send_other_messages: bool = member_json['can_send_other_messages'] # sticker, gif and inline bot self.can_add_web_page_previews: bool = member_json['can_add_web_page_previews'] # "embed links" in client if self.status == 'kicked': self.until_date: int = member_json['until_date'] if 'custom_title' in member_json.keys(): self.custom_title: str = member_json['custom_title'] def __str__(self): return self.raw class Message: def __init__(self, msg_json: dict): self.raw = msg_json self.chat = Chat(msg_json['chat']) self.id: int = msg_json['message_id'] # Empty for message in channels if 'from' in msg_json.keys(): self.from_ = User(msg_json['from']) if str(self.chat.id).startswith('-100'): self.link = f't.me/c/{str(self.chat.id).replace("-100", "")}/{self.id}' else: self.link = '' # The channel itself for channel messages. The supergroup itself for messages from anonymous group # administrators. The linked channel for messages automatically forwarded to the discussion group if 'sender_chat' in msg_json.keys(): self.sender_chat = Chat(msg_json['sender_chat']) self.date: int = msg_json['date'] # Signature of the post author for messages in channels, or the custom title of an anonymous group administrator if 'author_signature' in msg_json.keys(): self.author_signature: str = msg_json['author_signature'] if 'forward_from' in msg_json.keys(): # forwarded from users who allowed a link to their account in forwarded message self.forward_from = User(msg_json['forward_from']) self.forward = True elif 'forward_sender_name' in msg_json.keys(): # forwarded from users who disallowed a link to their account in forwarded message self.forward_sender_name: str = msg_json['forward_sender_name'] self.forward = True elif 'forward_from_message_id' in msg_json.keys(): # forwarded from channels self.forward_from_chat = Chat(msg_json['forward_from_chat']) self.forward_from_message_id: int = msg_json['forward_from_message_id'] if 'forward_signature' in msg_json.keys(): self.forward_signature: str = msg_json['forward_signature'] else: self.forward_signature = '' self.forward = True elif 'forward_from_chat' in msg_json.keys(): # forwarded from anonymous admins self.forward_from_chat = Chat(msg_json['forward_from_chat']) self.forward = True else: self.forward = False if self.forward: self.forward_date: int = msg_json['forward_date'] if 'reply_to_message' in msg_json.keys(): self.reply_to_message = Message(msg_json['reply_to_message']) self.reply = True else: self.reply = False if 'edit_date' in msg_json.keys(): self.edit_date: int = msg_json['edit_date'] self.edit = True else: self.edit = False if 'text' in msg_json.keys(): self.text: str = msg_json['text'] elif 'caption' in msg_json.keys(): self.text: str = msg_json['caption'] else: self.text: str = '' if 'new_chat_members' in msg_json.keys(): self.new_chat_members: List[User] = [] for user_json in msg_json['new_chat_members']: self.new_chat_members.append(User(user_json)) if 'left_chat_member' in msg_json.keys(): self.left_chat_member: User = User(msg_json['left_chat_member']) self.mentions = [] self.hashtags = [] self.cashtags = [] self.commands = [] self.links = [] self.bolds = [] self.italics = [] self.underlines = [] self.strikethroughs = [] self.codes = [] self.text_links = [] self.text_mention = [] self.html_formatted_text = self.text if 'entities' in msg_json.keys() or 'caption_entities' in msg_json.keys(): entity_type = 'entities' if 'entities' in msg_json.keys() else 'caption_entities' entity_to_be_formatted = [] for item in msg_json[entity_type]: offset = item['offset'] length = item['length'] if item['type'] == 'mention': self.mentions.append(self.text[offset:offset + length]) elif item['type'] == 'hashtag': self.hashtags.append(self.text[offset:offset + length]) elif item['type'] == 'cashtag': self.cashtags.append(self.text[offset:offset + length]) elif item['type'] == 'bot_command': self.commands.append(self.text[offset:offset + length]) elif item['type'] == 'url': self.links.append(self.text[offset:offset + length]) elif item['type'] == 'bold': self.bolds.append(self.text[offset:offset + length]) entity_to_be_formatted.append(item) elif item['type'] == 'italic': self.italics.append(self.text[offset:offset + length]) entity_to_be_formatted.append(item) elif item['type'] == 'underline': self.underlines.append(self.text[offset:offset + length]) entity_to_be_formatted.append(item) elif item['type'] == 'strikethrough': self.strikethroughs.append(self.text[offset:offset + length]) entity_to_be_formatted.append(item) elif item['type'] == 'code': self.codes.append(self.text[offset:offset + length]) entity_to_be_formatted.append(item) elif item['type'] == 'text_link': self.text_links.append((self.text[offset:offset + length], item['url'])) entity_to_be_formatted.append(item) elif item['type'] == 'text_mention': self.text_mention.append((self.text[offset:offset + length], User(item['user']))) entity_to_be_formatted.append(item) entity_to_be_formatted = sorted(entity_to_be_formatted, key=lambda x: x['offset'], reverse=True) for item in entity_to_be_formatted: offset = item['offset'] length = item['length'] if item['type'] == 'bold': self.html_formatted_text = self.text[:offset] + f'<b>{self.text[offset:offset + length]}</b>' + \ self.html_formatted_text[offset + length:] elif item['type'] == 'italic': self.html_formatted_text = self.text[:offset] + f'<i>{self.text[offset:offset + length]}</i>' + \ self.html_formatted_text[offset + length:] elif item['type'] == 'underline': self.html_formatted_text = self.text[:offset] + f'<u>{self.text[offset:offset + length]}</u>' + \ self.html_formatted_text[offset + length:] elif item['type'] == 'strikethrough': self.html_formatted_text = self.text[:offset] + f'<s>{self.text[offset:offset + length]}</s>' + \ self.html_formatted_text[offset + length:] elif item['type'] == 'code': self.html_formatted_text = self.text[:offset] + \ f'<code>{self.text[offset:offset + length]}</code>' + \ self.html_formatted_text[offset + length:] elif item['type'] == 'text_link': self.html_formatted_text = self.text[:offset] + f"<a href=\"{item['url']}\">" \ f"{self.text[offset:offset + length]}</a>" + \ self.html_formatted_text[offset + length:] elif item['type'] == 'text_mention': self.html_formatted_text = self.text[:offset] + f"<a href=\"tg://user?id={item['user']['id']}\">" \ f"{self.text[offset:offset + length]}</a>" + \ self.html_formatted_text[offset + length:] if 'dice' in msg_json.keys(): self.dice = True self.dice_emoji = msg_json['dice']['emoji'] self.dice_value = msg_json['dice']['value'] else: self.dice = False if 'reply_markup' in msg_json.keys(): self.reply_markup: InlineKeyboard = InlineKeyboard.from_json(msg_json['reply_markup']) def __str__(self): return self.raw class InlineKeyboardButton: def __init__(self, text: str, **kwargs): """ :param text: Text showed on the button. :param kwargs: Other optional params defined in Telegram bot api. See https://core.telegram.org/bots/api#inlinekeyboardbutton - url: Optional. HTTP or tg:// url to be opened when button is pressed - callback_data: Optional. Data to be sent in a callback query to the bot when button is pressed, 1-64 bytes """ self.text = text if len(kwargs) == 0: raise APIError('Inline keyboard button must have either url or callback_data.') if 'url' in kwargs.keys(): self.url = kwargs['url'] if 'callback_data' in kwargs.keys(): self.callback_data = kwargs['callback_data'] @classmethod def from_json(cls, button_json: dict): return cls(**button_json) def parse(self) -> dict: """ :return: self.__dict__ for follow-up usage like json serialization. """ return self.__dict__ class InlineKeyboard: def __init__(self, key_list: List[List[InlineKeyboardButton]]): """ :param key_list: Use InlineKeyBoardButton to structure the buttons you want and pass it into this initializer. Each sublist represent a row. Buttons in the same sublist will be placed in the same row. """ self.key_list = key_list @classmethod def from_json(cls, markup_json: dict): markup_list: List[List[dict]] = markup_json['inline_keyboard'] key_list: List[List[InlineKeyboardButton]] = [] for i in range(len(markup_json)): key_list.append([]) for j in range(len(markup_json)): key_list[i].append(InlineKeyboardButton.from_json(markup_list[i][j])) return cls(key_list) def parse(self) -> Dict[str, List[List[Dict]]]: key_list: List[List[dict]] = [] for i in range(len(self.key_list)): key_list.append([]) for j in range(len(self.key_list[i])): key_list[i].append(self.key_list[i][j].parse()) return {'inline_keyboard': key_list} class CallbackQuery: def __init__(self, query_json: dict): self.raw = query_json self.id: str = query_json['id'] self.from_ = User(query_json['from']) if 'message' not in query_json.keys(): self.msg = '' else: self.msg = Message(query_json['message']) self.chat_instance: str = query_json['chat_instance'] if 'data' in query_json.keys(): self.data: str = query_json['data'] else: self.data = '' if 'inline_message_id' in query_json.keys(): self.inline_message_id: str = query_json['inline_message_id'] else: self.inline_message_id = '' def __str__(self): return self.raw class ChatMemberUpdate: def __init__(self, update_json: dict): self.raw = update_json self.chat = Chat(update_json['chat']) self.from_ = User(update_json['from']) self.date: int = update_json['date'] self.old_chat_member = ChatMember(update_json['old_chat_member'], self.chat.id) self.new_chat_member = ChatMember(update_json['new_chat_member'], self.chat.id) def __str__(self): return self.raw class Chat: def __init__(self, chat_json: dict): self.raw = chat_json self.id: int = chat_json['id'] self.type: str = chat_json['type'] if self.type == 'supergroup' or self.type == 'group' or self.type == 'channel': self.name: str = chat_json['title'] else: if 'last_name' in chat_json.keys(): self.name = f'{chat_json["first_name"]} {chat_json["last_name"]}' else: self.name = chat_json['first_name'] if 'username' in chat_json.keys(): self.username: str = chat_json['username'] self.link = 't.me/' + self.username else: self.username = '' self.link = '' # Returned by get_chat if 'bio' in chat_json.keys(): # If the chat is private chat self.bio: str = chat_json['bio'] if 'description' in chat_json.keys(): # If the chat is group, supergroup or channel self.description: str = chat_json['description'] if 'pinned_message' in chat_json.keys(): self.pinned_message = Message(chat_json['pinned_message']) if 'slow_mode_delay' in chat_json.keys(): # If the chat is supergroup self.slow_mode_delay: int = chat_json['slow_mode_delay'] if 'linked_chat_id' in chat_json.keys(): # If the supergroup or channel has a linked channel or supergroup, respectively self.linked_chat_id: int = chat_json['linked_chat_id'] def __str__(self): return self.raw class APIError(Exception): pass class UserNotFoundError(APIError): pass class ChatNotFoundError(APIError): pass class InsufficientRightError(APIError): pass class RestrictAdminError(APIError): pass class DeleteMessageError(APIError): pass
import io import json import aiohttp import discord import yaml from discord.ext import commands from z3rsramr import parse_sram import pyz3r from alttprbot.alttprgen.mystery import (generate_random_game, generate_test_game) from alttprbot.alttprgen.preset import get_preset, fetch_preset, generate_preset from alttprbot.alttprgen.spoilers import generate_spoiler_game from alttprbot.database import audit, config from alttprbot.exceptions import SahasrahBotException from alttprbot_discord.util.alttpr_discord import alttpr from ..util import checks # from config import Config as c class AlttprGen(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() @commands.is_owner() async def goalstring(self, ctx, hash_id): seed = await alttpr(hash_id=hash_id) await ctx.send( f"goal string: `{seed.generated_goal}`\n" f"file select code: {seed.build_file_select_code(emojis=self.bot.emojis)}" ) @commands.group( brief='Generate a race preset.', help='Generate a race preset. Find a list of presets at https://l.synack.live/presets', invoke_without_command=True, aliases=['racepreset'] ) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') async def preset(self, ctx, preset, hints=False): seed, preset_dict = await get_preset(preset, hints=hints, spoilers="off") if not seed: raise SahasrahBotException( 'Could not generate game. Maybe preset does not exist?') embed = await seed.embed(emojis=self.bot.emojis) await ctx.send(embed=embed) @preset.command( name='custom', brief='Generate a custom preset.', help='Generate a custom preset. This file should be attached to the message.' ) @commands.cooldown(rate=3, per=900, type=commands.BucketType.user) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') async def preset_custom(self, ctx, tournament: bool = True): if ctx.message.attachments: content = await ctx.message.attachments[0].read() preset_dict = yaml.safe_load(content) seed = await generate_preset(preset_dict, preset='custom', spoilers="off", tournament=True) else: raise SahasrahBotException("You must supply a valid yaml file.") embed = await seed.embed(emojis=self.bot.emojis) await ctx.send(embed=embed) @commands.group( brief='Generate a preset without the race flag enabled.', help='Generate a preset without the race flag enabled. Find a list of presets at https://l.synack.live/presets', invoke_without_command=True, ) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') async def nonracepreset(self, ctx, preset, hints=False): seed, preset_dict = await get_preset(preset, hints=hints, spoilers="on", tournament=False) if not seed: raise SahasrahBotException( 'Could not generate game. Maybe preset does not exist?') embed = await seed.embed(emojis=self.bot.emojis) await ctx.send(embed=embed) @nonracepreset.command( name='custom', brief='Generate a custom preset.', help='Generate a custom preset. This file should be attached to the message.' ) @commands.cooldown(rate=3, per=900, type=commands.BucketType.user) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') async def nonracepreset_custom(self, ctx, tournament: bool = True): if ctx.message.attachments: content = await ctx.message.attachments[0].read() preset_dict = yaml.safe_load(content) seed = await generate_preset(preset_dict, preset='custom', spoilers="on", tournament=True) else: raise SahasrahBotException("You must supply a valid yaml file.") embed = await seed.embed(emojis=self.bot.emojis) await ctx.send(embed=embed) @commands.command( brief='Generate a spoiler game.', help='Generate a spoiler game. Find a list of presets at https://l.synack.live/presets' ) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') async def spoiler(self, ctx, preset): seed, preset_dict, spoiler_log_url = await generate_spoiler_game(preset) if not seed: raise SahasrahBotException( 'Could not generate game. Maybe preset does not exist?') embed = await seed.embed(emojis=self.bot.emojis) await ctx.send(f'Spoiler log <{spoiler_log_url}>', embed=embed) @commands.group( brief='Generate a game with randomized settings.', help='Generate a game with randomized settings. Find a list of weights at https://l.synack.live/weights', invoke_without_command=True, ) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') @commands.cooldown(rate=3, per=900, type=commands.BucketType.user) async def random(self, ctx, weightset='weighted', tournament: bool = True): await randomgame(ctx=ctx, weightset=weightset, tournament=tournament, spoilers="off", festive=False) @random.command( name='custom', brief='Generate a mystery game with custom weights.', help='Generate a mystery game with custom weights. This file should be attached to the message.' ) @commands.cooldown(rate=3, per=900, type=commands.BucketType.user) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') async def random_custom(self, ctx, tournament: bool = True): if ctx.message.attachments: content = await ctx.message.attachments[0].read() weights = yaml.safe_load(content) await randomgame(ctx=ctx, weights=weights, weightset='custom', tournament=tournament, spoilers="off", festive=False) else: raise SahasrahBotException("You must supply a valid yaml file.") @commands.group( brief='Generate a mystery game.', help='Generate a mystery game. Find a list of weights at https://l.synack.live/weights', invoke_without_command=True, ) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') @commands.cooldown(rate=3, per=900, type=commands.BucketType.user) async def mystery(self, ctx, weightset='weighted'): await randomgame(ctx=ctx, weightset=weightset, tournament=True, spoilers="mystery", festive=False) @mystery.command( name='custom', brief='Generate a mystery game with custom weights.', help='Generate a mystery game with custom weights. This file should be attached to the message.' ) @commands.cooldown(rate=3, per=900, type=commands.BucketType.user) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') async def mystery_custom(self, ctx): if ctx.message.attachments: content = await ctx.message.attachments[0].read() weights = yaml.safe_load(content) await randomgame(ctx=ctx, weights=weights, weightset='custom', tournament=True, spoilers="mystery", festive=False) else: raise SahasrahBotException("You must supply a valid yaml file.") @commands.command( brief='Generate a mystery game.', help='Generate a mystery game. Find a list of weights at https://l.synack.live/weights' ) @commands.is_owner() async def mysterytest(self, ctx, weightset='bot_testing'): resp = await generate_test_game(weightset=weightset) await ctx.send(file=discord.File(io.StringIO(json.dumps(resp, indent=4)), filename=f"{weightset}.txt")) @commands.command( brief='Verify a game was generated by SahasrahBot.', help='Verify a game was generated by SahasrahBot.\nThis can be useful for checking that customizer games are not a plando or something like that if you accept viewer games as a streamer.' ) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') async def verifygame(self, ctx, hash_id): result = await audit.get_generated_game(hash_id) if result: await ctx.send(( f"{hash_id} was generated by SahasrahBot.\n\n" f"**Randomizer:** {result["randomizer"]}\n" f"**Game Type:** {result["gentype"]}\n" f"**Game Option:** {result["genoption"]}\n\n" f"**Permalink:** <{result["permalink"]}>" )) else: await ctx.send("That game was not generated by SahasrahBot.") @commands.command( brief='Get changes in retrieved game vs. baseline settings.' ) @commands.is_owner() async def mysteryspoiler(self, ctx, hash_id): result = await audit.get_generated_game(hash_id) if not result: raise SahasrahBotException('That game was not generated by this bot.') if not result['randomizer'] == 'alttpr': raise SahasrahBotException('That is not an alttpr game.') if not result['gentype'] == 'mystery': raise SahasrahBotException('That is not a mystery game.') settings = json.loads(result['settings']) await ctx.send(file=discord.File(io.StringIO(json.dumps(settings, indent=4)), filename=f"{hash_id}.txt")) @commands.group( brief='Generate a festive game with randomized settings.', help='Generate a festive game with randomized settings. Find a list of weights at https://l.synack.live/weights' ) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') @checks.restrict_command_globally('FestiveMode') @commands.cooldown(rate=3, per=900, type=commands.BucketType.user) async def festiverandom(self, ctx, weightset='weighted', tournament: bool = True): await randomgame(ctx=ctx, weightset=weightset, tournament=tournament, spoilers="off", festive=True) @commands.group( brief='Generate a festive mystery game.', help='Generate a festive mystery game. Find a list of weights at https://l.synack.live/weights' ) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') @checks.restrict_command_globally('FestiveMode') @commands.cooldown(rate=3, per=900, type=commands.BucketType.user) async def festivemystery(self, ctx, weightset='weighted'): await randomgame(ctx=ctx, weightset=weightset, tournament=True, spoilers="mystery", festive=True) @commands.command(hidden=True, aliases=['festives']) async def festive(self, ctx): if await config.get(0, 'FestiveMode') == "true": embed = discord.Embed( title='Festive Randomizer Information', description='Latest details of any upcoming festive randomizers.', color=discord.Color.green() ) embed.add_field(name="Christmas Festive 2019", value="https://alttpr.com/special") else: embed = discord.Embed( title='Festive Randomizer Information', description='Latest details of any upcoming festive randomizers.', color=discord.Color.red() ) embed.set_image( url='https://cdn.discordapp.com/attachments/307860211333595146/654123045375442954/unknown.png') await ctx.send(embed=embed) @commands.command() async def alttprstats(self, ctx, raw: bool = False): if ctx.message.attachments: sram = await ctx.message.attachments[0].read() parsed = parse_sram(sram) if raw: await ctx.send( file=discord.File( io.StringIO(json.dumps(parsed, indent=4)), filename=f"stats_{parsed["meta"].get("filename", "alttpr").strip()}.txt" ) ) else: embed = discord.Embed( title=f"ALTTPR Stats for \"{parsed["meta"].get("filename", "").strip()}\"", description=f"Collection Rate {parsed["stats"].get("collection rate")}", color=discord.Color.blue() ) embed.add_field( name="Time", value=( f"Total Time: {parsed["stats"].get("total time", None)}\n" f"Lag Time: {parsed["stats"].get("lag time", None)}\n" f"Menu Time: {parsed["stats"].get("menu time", None)}\n\n" f"First Sword: {parsed["stats"].get("first sword", None)}\n" f"Flute Found: {parsed["stats"].get("flute found", None)}\n" f"Mirror Found: {parsed["stats"].get("mirror found", None)}\n" f"Boots Found: {parsed["stats"].get("boots found", None)}\n" ), inline=False ) embed.add_field( name="Important Stats", value=( f"Bonks: {parsed["stats"].get("bonks", None)}\n" f"Deaths: {parsed["stats"].get("deaths", None)}\n" f"Revivals: {parsed["stats"].get("faerie revivals", None)}\n" f"Overworld Mirrors: {parsed["stats"].get("overworld mirrors", None)}\n" f"Rupees Spent: {parsed["stats"].get("rupees spent", None)}\n" f"Save and Quits: {parsed["stats"].get("save and quits", None)}\n" f"Screen Transitions: {parsed["stats"].get("screen transitions", None)}\n" f"Times Fluted: {parsed["stats"].get("times fluted", None)}\n" f"Underworld Mirrors: {parsed["stats"].get("underworld mirrors", None)}\n" ) ) embed.add_field( name="Misc Stats", value=( f"Swordless Bosses: {parsed["stats"].get("swordless bosses", None)}\n" f"Fighter Sword Bosses: {parsed["stats"].get("fighter sword bosses", None)}\n" f"Master Sword Bosses: {parsed["stats"].get("master sword bosses", None)}\n" f"Tempered Sword Bosses: {parsed["stats"].get("tempered sword bosses", None)}\n" f"Golden Sword Bosses: {parsed["stats"].get("golden sword bosses", None)}\n\n" f"Heart Containers: {parsed["stats"].get("heart containers", None)}\n" f"Heart Containers: {parsed["stats"].get("heart pieces", None)}\n" f"Mail Upgrade: {parsed["stats"].get("mails", None)}\n" f"Bottles: {parsed["equipment"].get("bottles", None)}\n" f"Silver Arrows: {parsed["equipment"].get("silver arrows", None)}\n" ) ) if not parsed.get('hash id', 'none') == 'none': seed = await alttpr(hash_id=parsed.get('hash id', 'none')) embed.add_field(name='File Select Code', value=seed.build_file_select_code( emojis=ctx.bot.emojis ), inline=False) embed.add_field(name='Permalink', value=seed.url, inline=False) await ctx.send(embed=embed) else: raise SahasrahBotException("You must attach an SRAM file.") @commands.command( brief='Make a SahasrahBot preset file from a customizer save.', help=( 'Take a customizer settings save and create a SahasrahBot preset file from it.\n' 'This can then be fed into SahasrahBot using the "$preset custom" command.\n\n' ) ) async def convertcustomizer(self, ctx): if ctx.message.attachments: content = await ctx.message.attachments[0].read() customizer_save = json.loads(content) settings = pyz3r.customizer.convert2settings(customizer_save) preset_dict = { 'customizer': True, 'goal_name': "REPLACE WITH SRL GOAL STRING", 'randomizer': 'alttpr', 'settings': settings } await ctx.send( file=discord.File( io.StringIO(yaml.dump(preset_dict)), filename=f"output.yaml" ) ) else: raise SahasrahBotException("You must supply a valid yaml file.") async def randomgame(ctx, weightset=None, weights=None, tournament=True, spoilers="off", festive=False): seed = await generate_random_game( weightset=weightset, weights=weights, tournament=tournament, spoilers=spoilers, festive=festive ) embed = await seed.embed(emojis=ctx.bot.emojis, name="Mystery Game") await ctx.send(embed=embed) async def get_customizer_json(url): async with aiohttp.ClientSession() as session: async with session.get(url) as resp: text = await resp.read() return json.loads(text) def setup(bot): bot.add_cog(AlttprGen(bot))
import io import json import aiohttp import discord import yaml from discord.ext import commands from z3rsramr import parse_sram import pyz3r from alttprbot.alttprgen.mystery import (generate_random_game, generate_test_game) from alttprbot.alttprgen.preset import get_preset, fetch_preset, generate_preset from alttprbot.alttprgen.spoilers import generate_spoiler_game from alttprbot.database import audit, config from alttprbot.exceptions import SahasrahBotException from alttprbot_discord.util.alttpr_discord import alttpr from ..util import checks # from config import Config as c class AlttprGen(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() @commands.is_owner() async def goalstring(self, ctx, hash_id): seed = await alttpr(hash_id=hash_id) await ctx.send( f"goal string: `{seed.generated_goal}`\n" f"file select code: {seed.build_file_select_code(emojis=self.bot.emojis)}" ) @commands.group( brief='Generate a race preset.', help='Generate a race preset. Find a list of presets at https://l.synack.live/presets', invoke_without_command=True, aliases=['racepreset'] ) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') async def preset(self, ctx, preset, hints=False): seed, preset_dict = await get_preset(preset, hints=hints, spoilers="off") if not seed: raise SahasrahBotException( 'Could not generate game. Maybe preset does not exist?') embed = await seed.embed(emojis=self.bot.emojis) await ctx.send(embed=embed) @preset.command( name='custom', brief='Generate a custom preset.', help='Generate a custom preset. This file should be attached to the message.' ) @commands.cooldown(rate=3, per=900, type=commands.BucketType.user) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') async def preset_custom(self, ctx, tournament: bool = True): if ctx.message.attachments: content = await ctx.message.attachments[0].read() preset_dict = yaml.safe_load(content) seed = await generate_preset(preset_dict, preset='custom', spoilers="off", tournament=True) else: raise SahasrahBotException("You must supply a valid yaml file.") embed = await seed.embed(emojis=self.bot.emojis) await ctx.send(embed=embed) @commands.group( brief='Generate a preset without the race flag enabled.', help='Generate a preset without the race flag enabled. Find a list of presets at https://l.synack.live/presets', invoke_without_command=True, ) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') async def nonracepreset(self, ctx, preset, hints=False): seed, preset_dict = await get_preset(preset, hints=hints, spoilers="on", tournament=False) if not seed: raise SahasrahBotException( 'Could not generate game. Maybe preset does not exist?') embed = await seed.embed(emojis=self.bot.emojis) await ctx.send(embed=embed) @nonracepreset.command( name='custom', brief='Generate a custom preset.', help='Generate a custom preset. This file should be attached to the message.' ) @commands.cooldown(rate=3, per=900, type=commands.BucketType.user) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') async def nonracepreset_custom(self, ctx, tournament: bool = True): if ctx.message.attachments: content = await ctx.message.attachments[0].read() preset_dict = yaml.safe_load(content) seed = await generate_preset(preset_dict, preset='custom', spoilers="on", tournament=True) else: raise SahasrahBotException("You must supply a valid yaml file.") embed = await seed.embed(emojis=self.bot.emojis) await ctx.send(embed=embed) @commands.command( brief='Generate a spoiler game.', help='Generate a spoiler game. Find a list of presets at https://l.synack.live/presets' ) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') async def spoiler(self, ctx, preset): seed, preset_dict, spoiler_log_url = await generate_spoiler_game(preset) if not seed: raise SahasrahBotException( 'Could not generate game. Maybe preset does not exist?') embed = await seed.embed(emojis=self.bot.emojis) await ctx.send(f'Spoiler log <{spoiler_log_url}>', embed=embed) @commands.group( brief='Generate a game with randomized settings.', help='Generate a game with randomized settings. Find a list of weights at https://l.synack.live/weights', invoke_without_command=True, ) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') @commands.cooldown(rate=3, per=900, type=commands.BucketType.user) async def random(self, ctx, weightset='weighted', tournament: bool = True): await randomgame(ctx=ctx, weightset=weightset, tournament=tournament, spoilers="off", festive=False) @random.command( name='custom', brief='Generate a mystery game with custom weights.', help='Generate a mystery game with custom weights. This file should be attached to the message.' ) @commands.cooldown(rate=3, per=900, type=commands.BucketType.user) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') async def random_custom(self, ctx, tournament: bool = True): if ctx.message.attachments: content = await ctx.message.attachments[0].read() weights = yaml.safe_load(content) await randomgame(ctx=ctx, weights=weights, weightset='custom', tournament=tournament, spoilers="off", festive=False) else: raise SahasrahBotException("You must supply a valid yaml file.") @commands.group( brief='Generate a mystery game.', help='Generate a mystery game. Find a list of weights at https://l.synack.live/weights', invoke_without_command=True, ) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') @commands.cooldown(rate=3, per=900, type=commands.BucketType.user) async def mystery(self, ctx, weightset='weighted'): await randomgame(ctx=ctx, weightset=weightset, tournament=True, spoilers="mystery", festive=False) @mystery.command( name='custom', brief='Generate a mystery game with custom weights.', help='Generate a mystery game with custom weights. This file should be attached to the message.' ) @commands.cooldown(rate=3, per=900, type=commands.BucketType.user) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') async def mystery_custom(self, ctx): if ctx.message.attachments: content = await ctx.message.attachments[0].read() weights = yaml.safe_load(content) await randomgame(ctx=ctx, weights=weights, weightset='custom', tournament=True, spoilers="mystery", festive=False) else: raise SahasrahBotException("You must supply a valid yaml file.") @commands.command( brief='Generate a mystery game.', help='Generate a mystery game. Find a list of weights at https://l.synack.live/weights' ) @commands.is_owner() async def mysterytest(self, ctx, weightset='bot_testing'): resp = await generate_test_game(weightset=weightset) await ctx.send(file=discord.File(io.StringIO(json.dumps(resp, indent=4)), filename=f"{weightset}.txt")) @commands.command( brief='Verify a game was generated by SahasrahBot.', help='Verify a game was generated by SahasrahBot.\nThis can be useful for checking that customizer games are not a plando or something like that if you accept viewer games as a streamer.' ) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') async def verifygame(self, ctx, hash_id): result = await audit.get_generated_game(hash_id) if result: await ctx.send(( f"{hash_id} was generated by SahasrahBot.\n\n" f"**Randomizer:** {result['randomizer']}\n" f"**Game Type:** {result['gentype']}\n" f"**Game Option:** {result['genoption']}\n\n" f"**Permalink:** <{result['permalink']}>" )) else: await ctx.send("That game was not generated by SahasrahBot.") @commands.command( brief='Get changes in retrieved game vs. baseline settings.' ) @commands.is_owner() async def mysteryspoiler(self, ctx, hash_id): result = await audit.get_generated_game(hash_id) if not result: raise SahasrahBotException('That game was not generated by this bot.') if not result['randomizer'] == 'alttpr': raise SahasrahBotException('That is not an alttpr game.') if not result['gentype'] == 'mystery': raise SahasrahBotException('That is not a mystery game.') settings = json.loads(result['settings']) await ctx.send(file=discord.File(io.StringIO(json.dumps(settings, indent=4)), filename=f"{hash_id}.txt")) @commands.group( brief='Generate a festive game with randomized settings.', help='Generate a festive game with randomized settings. Find a list of weights at https://l.synack.live/weights' ) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') @checks.restrict_command_globally('FestiveMode') @commands.cooldown(rate=3, per=900, type=commands.BucketType.user) async def festiverandom(self, ctx, weightset='weighted', tournament: bool = True): await randomgame(ctx=ctx, weightset=weightset, tournament=tournament, spoilers="off", festive=True) @commands.group( brief='Generate a festive mystery game.', help='Generate a festive mystery game. Find a list of weights at https://l.synack.live/weights' ) @checks.restrict_to_channels_by_guild_config('AlttprGenRestrictChannels') @checks.restrict_command_globally('FestiveMode') @commands.cooldown(rate=3, per=900, type=commands.BucketType.user) async def festivemystery(self, ctx, weightset='weighted'): await randomgame(ctx=ctx, weightset=weightset, tournament=True, spoilers="mystery", festive=True) @commands.command(hidden=True, aliases=['festives']) async def festive(self, ctx): if await config.get(0, 'FestiveMode') == "true": embed = discord.Embed( title='Festive Randomizer Information', description='Latest details of any upcoming festive randomizers.', color=discord.Color.green() ) embed.add_field(name="Christmas Festive 2019", value="https://alttpr.com/special") else: embed = discord.Embed( title='Festive Randomizer Information', description='Latest details of any upcoming festive randomizers.', color=discord.Color.red() ) embed.set_image( url='https://cdn.discordapp.com/attachments/307860211333595146/654123045375442954/unknown.png') await ctx.send(embed=embed) @commands.command() async def alttprstats(self, ctx, raw: bool = False): if ctx.message.attachments: sram = await ctx.message.attachments[0].read() parsed = parse_sram(sram) if raw: await ctx.send( file=discord.File( io.StringIO(json.dumps(parsed, indent=4)), filename=f"stats_{parsed['meta'].get('filename', 'alttpr').strip()}.txt" ) ) else: embed = discord.Embed( title=f"ALTTPR Stats for \"{parsed['meta'].get('filename', '').strip()}\"", description=f"Collection Rate {parsed['stats'].get('collection rate')}", color=discord.Color.blue() ) embed.add_field( name="Time", value=( f"Total Time: {parsed['stats'].get('total time', None)}\n" f"Lag Time: {parsed['stats'].get('lag time', None)}\n" f"Menu Time: {parsed['stats'].get('menu time', None)}\n\n" f"First Sword: {parsed['stats'].get('first sword', None)}\n" f"Flute Found: {parsed['stats'].get('flute found', None)}\n" f"Mirror Found: {parsed['stats'].get('mirror found', None)}\n" f"Boots Found: {parsed['stats'].get('boots found', None)}\n" ), inline=False ) embed.add_field( name="Important Stats", value=( f"Bonks: {parsed['stats'].get('bonks', None)}\n" f"Deaths: {parsed['stats'].get('deaths', None)}\n" f"Revivals: {parsed['stats'].get('faerie revivals', None)}\n" f"Overworld Mirrors: {parsed['stats'].get('overworld mirrors', None)}\n" f"Rupees Spent: {parsed['stats'].get('rupees spent', None)}\n" f"Save and Quits: {parsed['stats'].get('save and quits', None)}\n" f"Screen Transitions: {parsed['stats'].get('screen transitions', None)}\n" f"Times Fluted: {parsed['stats'].get('times fluted', None)}\n" f"Underworld Mirrors: {parsed['stats'].get('underworld mirrors', None)}\n" ) ) embed.add_field( name="Misc Stats", value=( f"Swordless Bosses: {parsed['stats'].get('swordless bosses', None)}\n" f"Fighter Sword Bosses: {parsed['stats'].get('fighter sword bosses', None)}\n" f"Master Sword Bosses: {parsed['stats'].get('master sword bosses', None)}\n" f"Tempered Sword Bosses: {parsed['stats'].get('tempered sword bosses', None)}\n" f"Golden Sword Bosses: {parsed['stats'].get('golden sword bosses', None)}\n\n" f"Heart Containers: {parsed['stats'].get('heart containers', None)}\n" f"Heart Containers: {parsed['stats'].get('heart pieces', None)}\n" f"Mail Upgrade: {parsed['stats'].get('mails', None)}\n" f"Bottles: {parsed['equipment'].get('bottles', None)}\n" f"Silver Arrows: {parsed['equipment'].get('silver arrows', None)}\n" ) ) if not parsed.get('hash id', 'none') == 'none': seed = await alttpr(hash_id=parsed.get('hash id', 'none')) embed.add_field(name='File Select Code', value=seed.build_file_select_code( emojis=ctx.bot.emojis ), inline=False) embed.add_field(name='Permalink', value=seed.url, inline=False) await ctx.send(embed=embed) else: raise SahasrahBotException("You must attach an SRAM file.") @commands.command( brief='Make a SahasrahBot preset file from a customizer save.', help=( 'Take a customizer settings save and create a SahasrahBot preset file from it.\n' 'This can then be fed into SahasrahBot using the "$preset custom" command.\n\n' ) ) async def convertcustomizer(self, ctx): if ctx.message.attachments: content = await ctx.message.attachments[0].read() customizer_save = json.loads(content) settings = pyz3r.customizer.convert2settings(customizer_save) preset_dict = { 'customizer': True, 'goal_name': "REPLACE WITH SRL GOAL STRING", 'randomizer': 'alttpr', 'settings': settings } await ctx.send( file=discord.File( io.StringIO(yaml.dump(preset_dict)), filename=f"output.yaml" ) ) else: raise SahasrahBotException("You must supply a valid yaml file.") async def randomgame(ctx, weightset=None, weights=None, tournament=True, spoilers="off", festive=False): seed = await generate_random_game( weightset=weightset, weights=weights, tournament=tournament, spoilers=spoilers, festive=festive ) embed = await seed.embed(emojis=ctx.bot.emojis, name="Mystery Game") await ctx.send(embed=embed) async def get_customizer_json(url): async with aiohttp.ClientSession() as session: async with session.get(url) as resp: text = await resp.read() return json.loads(text) def setup(bot): bot.add_cog(AlttprGen(bot))
from __future__ import annotations import typing as T import tempfile import importlib.resources from pathlib import Path import subprocess from . import cmake_exe def find_library(lib_name: str, lib_path: list[str], env: T.Mapping[str, str]) -> bool: """ check if library exists with CMake lib_name must have the appropriate upper and lower case letter as would be used directly in CMake. """ cmake = cmake_exe() with importlib.resources.path("gemini3d.cmake", "FindLAPACK.cmake") as f: mod_path = Path(f).parent cmake_template = """ cmake_minimum_required(VERSION 3.15) project(dummy LANGUAGES C Fortran) """ if mod_path.is_dir(): cmake_template += f'list(APPEND CMAKE_MODULE_PATH "{mod_path.as_posix()}")\n' cmake_template += f"find_package({lib_name} REQUIRED)\n" build_dir = f"find-{lib_name.split(" ", 1)[0]}" # not context_manager to avoid Windows PermissionError on context exit for Git subdirs d = tempfile.TemporaryDirectory() r = Path(d.name) (r / "CMakeLists.txt").write_text(cmake_template) cmd = [cmake, "-S", str(r), "-B", str(r / build_dir)] + lib_path # use cwd= to avoid spilling temporary files into current directory if ancient CMake used # also avoids bugs if there is a CMakeLists.txt in the current directory ret = subprocess.run(cmd, env=env, cwd=r) try: d.cleanup() except PermissionError: pass return ret.returncode == 0
from __future__ import annotations import typing as T import tempfile import importlib.resources from pathlib import Path import subprocess from . import cmake_exe def find_library(lib_name: str, lib_path: list[str], env: T.Mapping[str, str]) -> bool: """ check if library exists with CMake lib_name must have the appropriate upper and lower case letter as would be used directly in CMake. """ cmake = cmake_exe() with importlib.resources.path("gemini3d.cmake", "FindLAPACK.cmake") as f: mod_path = Path(f).parent cmake_template = """ cmake_minimum_required(VERSION 3.15) project(dummy LANGUAGES C Fortran) """ if mod_path.is_dir(): cmake_template += f'list(APPEND CMAKE_MODULE_PATH "{mod_path.as_posix()}")\n' cmake_template += f"find_package({lib_name} REQUIRED)\n" build_dir = f"find-{lib_name.split(' ', 1)[0]}" # not context_manager to avoid Windows PermissionError on context exit for Git subdirs d = tempfile.TemporaryDirectory() r = Path(d.name) (r / "CMakeLists.txt").write_text(cmake_template) cmd = [cmake, "-S", str(r), "-B", str(r / build_dir)] + lib_path # use cwd= to avoid spilling temporary files into current directory if ancient CMake used # also avoids bugs if there is a CMakeLists.txt in the current directory ret = subprocess.run(cmd, env=env, cwd=r) try: d.cleanup() except PermissionError: pass return ret.returncode == 0
import os import requests from discord.ext import commands from cogs.bot import send_embed class ESV(commands.Cog): """listens for the ESV Bible commands.""" @commands.command() async def esv(self, ctx, *, passage: str = None): if passage is None: return # Bible verse numbers are wrapped with '[]' in the api # replace brackets with discord's single line code block. description = get_esv_verse(passage).replace('[', '`').replace(']', '`') await send_embed(ctx, title='🙏🏻 English Standard Version', text=description) def get_esv_verse(passage): """looks up the given passage in English Standard Version using an ESV API. for the api's documentation and optional parameters: https://api.esv.org/docs/passage-text/ the api returns the passage in a .json format. the .json file will be converted to a str and then printed to the user. Parameters ---------- :param str passage: the book, chapter, and verse to look up. :return: a string of the given passage. """ url = 'https://api.esv.org/v3/passage/text' params = { 'q': passage, 'include-footnotes': False, # do not display footnotes. 'include-short-copyright': False # do not display (ESV) } headers = { 'Authorization': f'Token {os.getenv('ESV_API_KEY')}' } # get passage from api. response = requests.get(url, params=params, headers=headers).json()['passages'] return response[0] if response else 'passage not found.' # connect this cog to bot. def setup(bot): bot.add_cog(ESV(bot))
import os import requests from discord.ext import commands from cogs.bot import send_embed class ESV(commands.Cog): """listens for the ESV Bible commands.""" @commands.command() async def esv(self, ctx, *, passage: str = None): if passage is None: return # Bible verse numbers are wrapped with '[]' in the api # replace brackets with discord's single line code block. description = get_esv_verse(passage).replace('[', '`').replace(']', '`') await send_embed(ctx, title='🙏🏻 English Standard Version', text=description) def get_esv_verse(passage): """looks up the given passage in English Standard Version using an ESV API. for the api's documentation and optional parameters: https://api.esv.org/docs/passage-text/ the api returns the passage in a .json format. the .json file will be converted to a str and then printed to the user. Parameters ---------- :param str passage: the book, chapter, and verse to look up. :return: a string of the given passage. """ url = 'https://api.esv.org/v3/passage/text' params = { 'q': passage, 'include-footnotes': False, # do not display footnotes. 'include-short-copyright': False # do not display (ESV) } headers = { 'Authorization': f'Token {os.getenv("ESV_API_KEY")}' } # get passage from api. response = requests.get(url, params=params, headers=headers).json()['passages'] return response[0] if response else 'passage not found.' # connect this cog to bot. def setup(bot): bot.add_cog(ESV(bot))
#!/usr/bin/env python from __future__ import print_function import argparse import csv import json import re import sys import time import closeio_api import unidecode from closeio_api import Client as CloseIO_API from closeio_api.utils import count_lines, title_case, uncamel from progressbar import ProgressBar from progressbar.widgets import ETA, Bar, FileTransferSpeed, Percentage from requests.exceptions import ConnectionError parser = argparse.ArgumentParser(description='Import leads from CSV file') parser.add_argument('--api-key', '-k', required=True, help='API Key') parser.add_argument( '--skip_duplicates', action='store_true', help='Skip leads that are already present in Close.io (determined by company name).', ) parser.add_argument( '--no_grouping', action='store_true', help='Turn off the default group-by-company behavior.', ) parser.add_argument('file', help='Path to the csv file') args = parser.parse_args() reader = csv.DictReader(open(args.file)) headers = reader.fieldnames import_count = count_lines(args.file) # may have no trailing newline cnt = success_cnt = 0 def warning(*objs): print("WARNING: ", *objs, file=sys.stderr) def slugify(str, separator='_'): str = unidecode.unidecode(str).lower().strip() return re.sub(r'\W+', separator, str).strip(separator) # Look for headers/columns that match these, case-insensitive. All other headers will be treated as custom fields. expected_headers = ( 'company', # multiple contacts will be grouped if company names match 'url', 'status', 'contact', # name of contact 'title', 'email', 'phone', # recommended to start with "+" followed by country code (e.g., +1 650 555 1234) 'mobile_phone', 'fax', 'address', 'address_1', # if address is missing, address_1 and address_2 will be combined to create it. 'address_2', # if address is missing, address_1 and address_2 will be combined to create it. 'city', 'state', 'zip', 'country', ) # Remove trailing empty column headers while not len(headers[-1].strip()): del headers[-1] # Check for duplicated column names if len(set(headers)) != len(headers): raise Exception('Cannot have duplicate column header names') # Check for duplicates after normalization normalized_headers = [slugify(col) for col in headers] if len(set(normalized_headers)) != len(normalized_headers): raise Exception( 'After column header names were normalized there were duplicate column header names' ) # build a map of header names -> index in actual header row header_indices = { col: i for (i, col) in enumerate(normalized_headers) } # normalized columns as keys header_indices.update( {col: i for (i, col) in enumerate(headers)} ) # add in original column names as keys expected_headers = [ col for col in normalized_headers if col in expected_headers ] custom_headers = list( set(normalized_headers) - set(expected_headers) ) # non-recognized fields in slug-ed format # restore original version (capitalization) to custom fields custom_headers = [ headers[header_indices[normalized_col]] for normalized_col in custom_headers ] print("\nRecognized these column names:") print(f'> {', '.join(expected_headers)}') if len(custom_headers): print( "\nThe following column names weren't recognized, and will be imported as custom fields:" ) print(f'> {', '.join(custom_headers)}') print('') def lead_from_row(row): row = { column_name: column_value.strip() for column_name, column_value in row.items() } # strip unnecessary white spaces # check if the row isn't empty has_data = { column_name: column_value for column_name, column_value in row.items() if column_value } if not has_data: return None lead = {'name': row['company'], 'contacts': [], 'custom': {}} if 'url' in row: lead['url'] = row['url'] if 'status' in row: lead['status'] = row['status'] if lead.get('url') and '://' not in lead['url']: lead['url'] = 'http://%s' % lead['url'] # custom fields for field in custom_headers: if field in row: lead['custom'][field] = row[field] # address address = {} if 'address' in row: address['address'] = row['address'] elif 'address_1' in row or 'address_2' in row: address['address'] = f'{row['address_1']} {row['address_2']}'.strip() if 'city' in row: address['city'] = title_case(row['city']) if 'state' in row: address['state'] = row['state'] if 'zip' in row: address['zipcode'] = row['zip'] if 'country' in row: address['country'] = row['country'] if len(address): lead['addresses'] = [address] # contact contact = {} if 'contact' in row: contact['name'] = uncamel(row['contact']) if 'title' in row: contact['title'] = row['title'] phones = [] if 'phone' in row: phones.append({'phone': row['phone'], 'type': 'office'}) if 'mobile_phone' in row: phones.append({'phone': row['mobile_phone'], 'type': 'mobile'}) if 'fax' in row: phones.append({'phone': row['fax'], 'type': 'fax'}) if len(phones): contact['phones'] = phones emails = [] if 'email' in row: emails.append({'email': row['email'], 'type': 'office'}) if len(emails): contact['emails'] = emails if len(contact): lead['contacts'] = [contact] return lead # Create leads, grouped by company name unique_leads = {} for i, row in enumerate(reader): lead = lead_from_row(row) if not lead: continue if args.no_grouping: grouper = 'row-num-%s' % i else: # group by lead Name (company) if possible, otherwise put each row in its own lead grouper = lead['name'] if lead['name'] else ('row-num-%s' % i) if grouper not in unique_leads: unique_leads[grouper] = lead elif lead['contacts'] not in unique_leads[grouper]['contacts']: unique_leads[grouper]['contacts'].extend(lead['contacts']) print( f'Found {len(unique_leads)} leads (grouped by company) from {import_count} contacts.' ) print('\nHere is a sample lead (last row):') print(json.dumps(unique_leads[grouper], indent=4)) print('\nAre you sure you want to continue? (y/n) ') if input('') != 'y': sys.exit() ############################################################################## api = CloseIO_API(args.api_key) progress_widgets = [ 'Importing %d rows: ' % import_count, Percentage(), ' ', Bar(), ' ', ETA(), ' ', FileTransferSpeed(), ] pbar = ProgressBar(widgets=progress_widgets, maxval=import_count).start() dupes_cnt = 0 for key, val in unique_leads.items(): retries = 5 # check if it's a duplicate dupe = False if args.skip_duplicates and val.get('name'): # get the org id necessary for search org_id = api.get('api_key')['data'][0]['organization_id'] # get all the search results for given lead name search_results = [] filters = { 'organization_id': org_id, 'query': 'name:"%s"' % key, } has_more = True skip = 0 while has_more: filters['_skip'] = skip resp = api.get('lead', params=filters) results = resp['data'] search_results.extend(results) has_more = resp['has_more'] skip += len(results) for result in search_results: if result['display_name'] == val['name']: dupe = True break while retries > 0: if dupe: dupes_cnt += 1 warning('Duplicate - not importing: %s' % val['name']) break try: retries -= 1 api.post('lead', val) retries = 0 success_cnt += 1 except closeio_api.APIError as err: warning('An error occurred while saving "%s"' % key) warning(err) retries = 0 except ConnectionError as e: warning('Connection error occurred, retrying... (%d/5)' % retries) if retries == 0: raise time.sleep(2) cnt += 1 if cnt > import_count: warning('Warning: count overflow') cnt = import_count pbar.update(cnt) pbar.finish() print(f'Successful responses: {success_cnt} of {len(unique_leads)}') if args.skip_duplicates: print(f'Duplicates: {dupes_cnt}')
#!/usr/bin/env python from __future__ import print_function import argparse import csv import json import re import sys import time import closeio_api import unidecode from closeio_api import Client as CloseIO_API from closeio_api.utils import count_lines, title_case, uncamel from progressbar import ProgressBar from progressbar.widgets import ETA, Bar, FileTransferSpeed, Percentage from requests.exceptions import ConnectionError parser = argparse.ArgumentParser(description='Import leads from CSV file') parser.add_argument('--api-key', '-k', required=True, help='API Key') parser.add_argument( '--skip_duplicates', action='store_true', help='Skip leads that are already present in Close.io (determined by company name).', ) parser.add_argument( '--no_grouping', action='store_true', help='Turn off the default group-by-company behavior.', ) parser.add_argument('file', help='Path to the csv file') args = parser.parse_args() reader = csv.DictReader(open(args.file)) headers = reader.fieldnames import_count = count_lines(args.file) # may have no trailing newline cnt = success_cnt = 0 def warning(*objs): print("WARNING: ", *objs, file=sys.stderr) def slugify(str, separator='_'): str = unidecode.unidecode(str).lower().strip() return re.sub(r'\W+', separator, str).strip(separator) # Look for headers/columns that match these, case-insensitive. All other headers will be treated as custom fields. expected_headers = ( 'company', # multiple contacts will be grouped if company names match 'url', 'status', 'contact', # name of contact 'title', 'email', 'phone', # recommended to start with "+" followed by country code (e.g., +1 650 555 1234) 'mobile_phone', 'fax', 'address', 'address_1', # if address is missing, address_1 and address_2 will be combined to create it. 'address_2', # if address is missing, address_1 and address_2 will be combined to create it. 'city', 'state', 'zip', 'country', ) # Remove trailing empty column headers while not len(headers[-1].strip()): del headers[-1] # Check for duplicated column names if len(set(headers)) != len(headers): raise Exception('Cannot have duplicate column header names') # Check for duplicates after normalization normalized_headers = [slugify(col) for col in headers] if len(set(normalized_headers)) != len(normalized_headers): raise Exception( 'After column header names were normalized there were duplicate column header names' ) # build a map of header names -> index in actual header row header_indices = { col: i for (i, col) in enumerate(normalized_headers) } # normalized columns as keys header_indices.update( {col: i for (i, col) in enumerate(headers)} ) # add in original column names as keys expected_headers = [ col for col in normalized_headers if col in expected_headers ] custom_headers = list( set(normalized_headers) - set(expected_headers) ) # non-recognized fields in slug-ed format # restore original version (capitalization) to custom fields custom_headers = [ headers[header_indices[normalized_col]] for normalized_col in custom_headers ] print("\nRecognized these column names:") print(f'> {", ".join(expected_headers)}') if len(custom_headers): print( "\nThe following column names weren't recognized, and will be imported as custom fields:" ) print(f'> {", ".join(custom_headers)}') print('') def lead_from_row(row): row = { column_name: column_value.strip() for column_name, column_value in row.items() } # strip unnecessary white spaces # check if the row isn't empty has_data = { column_name: column_value for column_name, column_value in row.items() if column_value } if not has_data: return None lead = {'name': row['company'], 'contacts': [], 'custom': {}} if 'url' in row: lead['url'] = row['url'] if 'status' in row: lead['status'] = row['status'] if lead.get('url') and '://' not in lead['url']: lead['url'] = 'http://%s' % lead['url'] # custom fields for field in custom_headers: if field in row: lead['custom'][field] = row[field] # address address = {} if 'address' in row: address['address'] = row['address'] elif 'address_1' in row or 'address_2' in row: address['address'] = f'{row["address_1"]} {row["address_2"]}'.strip() if 'city' in row: address['city'] = title_case(row['city']) if 'state' in row: address['state'] = row['state'] if 'zip' in row: address['zipcode'] = row['zip'] if 'country' in row: address['country'] = row['country'] if len(address): lead['addresses'] = [address] # contact contact = {} if 'contact' in row: contact['name'] = uncamel(row['contact']) if 'title' in row: contact['title'] = row['title'] phones = [] if 'phone' in row: phones.append({'phone': row['phone'], 'type': 'office'}) if 'mobile_phone' in row: phones.append({'phone': row['mobile_phone'], 'type': 'mobile'}) if 'fax' in row: phones.append({'phone': row['fax'], 'type': 'fax'}) if len(phones): contact['phones'] = phones emails = [] if 'email' in row: emails.append({'email': row['email'], 'type': 'office'}) if len(emails): contact['emails'] = emails if len(contact): lead['contacts'] = [contact] return lead # Create leads, grouped by company name unique_leads = {} for i, row in enumerate(reader): lead = lead_from_row(row) if not lead: continue if args.no_grouping: grouper = 'row-num-%s' % i else: # group by lead Name (company) if possible, otherwise put each row in its own lead grouper = lead['name'] if lead['name'] else ('row-num-%s' % i) if grouper not in unique_leads: unique_leads[grouper] = lead elif lead['contacts'] not in unique_leads[grouper]['contacts']: unique_leads[grouper]['contacts'].extend(lead['contacts']) print( f'Found {len(unique_leads)} leads (grouped by company) from {import_count} contacts.' ) print('\nHere is a sample lead (last row):') print(json.dumps(unique_leads[grouper], indent=4)) print('\nAre you sure you want to continue? (y/n) ') if input('') != 'y': sys.exit() ############################################################################## api = CloseIO_API(args.api_key) progress_widgets = [ 'Importing %d rows: ' % import_count, Percentage(), ' ', Bar(), ' ', ETA(), ' ', FileTransferSpeed(), ] pbar = ProgressBar(widgets=progress_widgets, maxval=import_count).start() dupes_cnt = 0 for key, val in unique_leads.items(): retries = 5 # check if it's a duplicate dupe = False if args.skip_duplicates and val.get('name'): # get the org id necessary for search org_id = api.get('api_key')['data'][0]['organization_id'] # get all the search results for given lead name search_results = [] filters = { 'organization_id': org_id, 'query': 'name:"%s"' % key, } has_more = True skip = 0 while has_more: filters['_skip'] = skip resp = api.get('lead', params=filters) results = resp['data'] search_results.extend(results) has_more = resp['has_more'] skip += len(results) for result in search_results: if result['display_name'] == val['name']: dupe = True break while retries > 0: if dupe: dupes_cnt += 1 warning('Duplicate - not importing: %s' % val['name']) break try: retries -= 1 api.post('lead', val) retries = 0 success_cnt += 1 except closeio_api.APIError as err: warning('An error occurred while saving "%s"' % key) warning(err) retries = 0 except ConnectionError as e: warning('Connection error occurred, retrying... (%d/5)' % retries) if retries == 0: raise time.sleep(2) cnt += 1 if cnt > import_count: warning('Warning: count overflow') cnt = import_count pbar.update(cnt) pbar.finish() print(f'Successful responses: {success_cnt} of {len(unique_leads)}') if args.skip_duplicates: print(f'Duplicates: {dupes_cnt}')
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import asyncio import io import logging import os import warnings import weakref from datetime import datetime, timedelta from glob import has_magic from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, ) from azure.datalake.store import AzureDLFileSystem, lib from azure.datalake.store.core import AzureDLFile, AzureDLPath from azure.storage.blob import BlobSasPermissions, generate_blob_sas from azure.storage.blob._models import BlobBlock, BlobProperties, BlobType from azure.storage.blob._shared.base_client import create_configuration from azure.storage.blob.aio import BlobServiceClient as AIOBlobServiceClient from azure.storage.blob.aio._list_blobs_helper import BlobPrefix from fsspec import AbstractFileSystem from fsspec.asyn import AsyncFileSystem, get_loop, get_running_loop, sync, sync_wrapper from fsspec.spec import AbstractBufferedFile from fsspec.utils import infer_storage_options, tokenize from .utils import ( close_container_client, close_service_client, filter_blobs, get_blob_metadata, ) logger = logging.getLogger(__name__) FORWARDED_BLOB_PROPERTIES = [ "metadata", "creation_time", "deleted", "deleted_time", "last_modified", "content_time", "content_settings", "remaining_retention_days", "archive_status", "last_accessed_on", "etag", "tags", "tag_count", ] _ROOT_PATH = "/" class AzureDatalakeFileSystem(AbstractFileSystem): """ Access Azure Datalake Gen1 as if it were a file system. This exposes a filesystem-like API on top of Azure Datalake Storage Parameters ----------- tenant_id: string Azure tenant, also known as the subscription id client_id: string The username or serivceprincipal id client_secret: string The access key store_name: string (optional) The name of the datalake account being accessed. Should be inferred from the urlpath if using with Dask read_xxx and to_xxx methods. Examples -------- >>> adl = AzureDatalakeFileSystem(tenant_id="xxxx", client_id="xxxx", ... client_secret="xxxx") >>> adl.ls('') Sharded Parquet & CSV files can be read as >>> storage_options = dict(tennant_id=TENNANT_ID, client_id=CLIENT_ID, ... client_secret=CLIENT_SECRET) # doctest: +SKIP >>> ddf = dd.read_parquet('adl://store_name/folder/filename.parquet', ... storage_options=storage_options) # doctest: +SKIP >>> ddf = dd.read_csv('adl://store_name/folder/*.csv' ... storage_options=storage_options) # doctest: +SKIP Sharded Parquet and CSV files can be written as >>> ddf.to_parquet("adl://store_name/folder/filename.parquet", ... storage_options=storage_options) # doctest: +SKIP >>> ddf.to_csv('adl://store_name/folder/*.csv' ... storage_options=storage_options) # doctest: +SKIP """ protocol = "adl" def __init__(self, tenant_id, client_id, client_secret, store_name): super().__init__() self.tenant_id = tenant_id self.client_id = client_id self.client_secret = client_secret self.store_name = store_name self.do_connect() @staticmethod def _get_kwargs_from_urls(paths): """Get the store_name from the urlpath and pass to storage_options""" ops = infer_storage_options(paths) out = {} if ops.get("host", None): out["store_name"] = ops["host"] return out @classmethod def _strip_protocol(cls, path): ops = infer_storage_options(path) return ops["path"] def do_connect(self): """Establish connection object.""" token = lib.auth( tenant_id=self.tenant_id, client_id=self.client_id, client_secret=self.client_secret, ) self.azure_fs = AzureDLFileSystem(token=token, store_name=self.store_name) def ls(self, path, detail=False, invalidate_cache=True, **kwargs): files = self.azure_fs.ls( path=path, detail=detail, invalidate_cache=invalidate_cache ) for file in (file for file in files if type(file) is dict): if "type" in file: file["type"] = file["type"].lower() if "length" in file: file["size"] = file["length"] return files def info(self, path, invalidate_cache=True, expected_error_code=404, **kwargs): info = self.azure_fs.info( path=path, invalidate_cache=invalidate_cache, expected_error_code=expected_error_code, ) info["size"] = info["length"] """Azure FS uses upper case type values but fsspec is expecting lower case""" info["type"] = info["type"].lower() return info def _trim_filename(self, fn, **kwargs): """Determine what kind of filestore this is and return the path""" so = infer_storage_options(fn) fileparts = so["path"] return fileparts def glob(self, path, details=False, invalidate_cache=True, **kwargs): """For a template path, return matching files""" adlpaths = self._trim_filename(path) filepaths = self.azure_fs.glob( adlpaths, details=details, invalidate_cache=invalidate_cache ) return filepaths def isdir(self, path, **kwargs): """Is this entry directory-like?""" try: return self.info(path)["type"].lower() == "directory" except FileNotFoundError: return False def isfile(self, path, **kwargs): """Is this entry file-like?""" try: return self.azure_fs.info(path)["type"].lower() == "file" except Exception: return False def _open( self, path, mode="rb", block_size=None, autocommit=True, cache_options: dict = {}, **kwargs, ): return AzureDatalakeFile(self, path, mode=mode) def read_block(self, fn, offset, length, delimiter=None, **kwargs): return self.azure_fs.read_block(fn, offset, length, delimiter) def ukey(self, path): return tokenize(self.info(path)["modificationTime"]) def size(self, path): return self.info(path)["length"] def rmdir(self, path): """Remove a directory, if empty""" self.azure_fs.rmdir(path) def rm_file(self, path): """Delete a file""" self.azure_fs.rm(path) def __getstate__(self): dic = self.__dict__.copy() logger.debug("Serialize with state: %s", dic) return dic def __setstate__(self, state): logger.debug("De-serialize with state: %s", state) self.__dict__.update(state) self.do_connect() class AzureDatalakeFile(AzureDLFile): # TODO: refoctor this. I suspect we actually want to compose an # AbstractBufferedFile with an AzureDLFile. def __init__( self, fs, path, mode="rb", autocommit=True, block_size=2**25, cache_type="bytes", cache_options=None, *, delimiter=None, **kwargs, ): super().__init__( azure=fs.azure_fs, path=AzureDLPath(path), mode=mode, blocksize=block_size, delimiter=delimiter, ) self.fs = fs self.path = AzureDLPath(path) self.mode = mode def seek(self, loc: int, whence: int = 0, **kwargs): """Set current file location Parameters ---------- loc: int byte location whence: {0, 1, 2} from start of file, current location or end of file, resp. """ loc = int(loc) if not self.mode == "rb": raise ValueError("Seek only available in read mode") if whence == 0: nloc = loc elif whence == 1: nloc = self.loc + loc elif whence == 2: nloc = self.size + loc else: raise ValueError("invalid whence (%s, should be 0, 1 or 2)" % whence) if nloc < 0: raise ValueError("Seek before start of file") self.loc = nloc return self.loc # https://github.com/Azure/azure-sdk-for-python/issues/11419#issuecomment-628143480 def make_callback(key, callback): if callback is None: return None sent_total = False def wrapper(response): nonlocal sent_total current = response.context.get(key) total = response.context["data_stream_total"] if not sent_total: callback.set_size(total) callback.absolute_update(current) return wrapper class AzureBlobFileSystem(AsyncFileSystem): """ Access Azure Datalake Gen2 and Azure Storage if it were a file system using Multiprotocol Access Parameters ---------- account_name: str The storage account name. This is used to authenticate requests signed with an account key and to construct the storage endpoint. It is required unless a connection string is given, or if a custom domain is used with anonymous authentication. account_key: str The storage account key. This is used for shared key authentication. If any of account key, sas token or client_id is specified, anonymous access will be used. sas_token: str A shared access signature token to use to authenticate requests instead of the account key. If account key and sas token are both specified, account key will be used to sign. If any of account key, sas token or client_id are specified, anonymous access will be used. request_session: Session The session object to use for http requests. connection_string: str If specified, this will override all other parameters besides request session. See http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ for the connection string format. socket_timeout: int If specified, this will override the default socket timeout. The timeout specified is in seconds. See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. credential: TokenCredential or SAS token The credentials with which to authenticate. Optional if the account URL already has a SAS token. Can include an instance of TokenCredential class from azure.identity blocksize: int The block size to use for download/upload operations. Defaults to the value of ``BlockBlobService.MAX_BLOCK_SIZE`` client_id: str Client ID to use when authenticating using an AD Service Principal client/secret. client_secret: str Client secret to use when authenticating using an AD Service Principal client/secret. tenant_id: str Tenant ID to use when authenticating using an AD Service Principal client/secret. default_fill_cache: bool = True Whether to use cache filling with opoen by default default_cache_type: string ('bytes') If given, the default cache_type value used for "open()". Set to none if no caching is desired. Docs in fsspec Pass on to fsspec: skip_instance_cache: to control reuse of instances use_listings_cache, listings_expiry_time, max_paths: to control reuse of directory listings Examples -------- Authentication with an account_key >>> abfs = AzureBlobFileSystem(account_name="XXXX", account_key="XXXX") >>> abfs.ls('') Authentication with an Azure ServicePrincipal >>> abfs = AzureBlobFileSystem(account_name="XXXX", tenant_id=TENANT_ID, ... client_id=CLIENT_ID, client_secret=CLIENT_SECRET) >>> abfs.ls('') Authentication with DefaultAzureCredential >>> abfs = AzureBlobFileSystem(account_name="XXXX", anon=False) >>> abfs.ls('') Read files as >>> ddf = dd.read_csv('abfs://container_name/folder/*.csv', storage_options={ ... 'account_name': ACCOUNT_NAME, 'tenant_id': TENANT_ID, 'client_id': CLIENT_ID, ... 'client_secret': CLIENT_SECRET}) ... }) Sharded Parquet & csv files can be read as: >>> ddf = dd.read_csv('abfs://container_name/folder/*.csv', storage_options={ ... 'account_name': ACCOUNT_NAME, 'account_key': ACCOUNT_KEY}) >>> ddf = dd.read_parquet('abfs://container_name/folder.parquet', storage_options={ ... 'account_name': ACCOUNT_NAME, 'account_key': ACCOUNT_KEY,}) """ protocol = "abfs" def __init__( self, account_name: str = None, account_key: str = None, connection_string: str = None, credential: str = None, sas_token: str = None, request_session=None, socket_timeout: int = None, blocksize: int = create_configuration(storage_sdk="blob").max_block_size, client_id: str = None, client_secret: str = None, tenant_id: str = None, anon: bool = True, location_mode: str = "primary", loop=None, asynchronous: bool = False, default_fill_cache: bool = True, default_cache_type: str = "bytes", **kwargs, ): super_kwargs = { k: kwargs.pop(k) for k in ["use_listings_cache", "listings_expiry_time", "max_paths"] if k in kwargs } # pass on to fsspec superclass super().__init__( asynchronous=asynchronous, loop=loop or get_loop(), **super_kwargs ) self.account_name = account_name or os.getenv("AZURE_STORAGE_ACCOUNT_NAME") self.account_key = account_key or os.getenv("AZURE_STORAGE_ACCOUNT_KEY") self.connection_string = connection_string or os.getenv( "AZURE_STORAGE_CONNECTION_STRING" ) self.sas_token = sas_token or os.getenv("AZURE_STORAGE_SAS_TOKEN") self.client_id = client_id or os.getenv("AZURE_STORAGE_CLIENT_ID") self.client_secret = client_secret or os.getenv("AZURE_STORAGE_CLIENT_SECRET") self.tenant_id = tenant_id or os.getenv("AZURE_STORAGE_TENANT_ID") self.anon = anon self.location_mode = location_mode self.credential = credential self.request_session = request_session self.socket_timeout = socket_timeout self.blocksize = blocksize self.default_fill_cache = default_fill_cache self.default_cache_type = default_cache_type if ( self.credential is None and self.account_key is None and self.sas_token is None and self.client_id is not None ): ( self.credential, self.sync_credential, ) = self._get_credential_from_service_principal() else: self.sync_credential = None self.do_connect() weakref.finalize(self, sync, self.loop, close_service_client, self) @classmethod def _strip_protocol(cls, path: str): """ Remove the protocol from the input path Parameters ---------- path: str Path to remove the protocol from Returns ------- str Returns a path without the protocol """ STORE_SUFFIX = ".dfs.core.windows.net" logger.debug(f"_strip_protocol for {path}") if not path.startswith(("abfs://", "az://", "abfss://")): path = path.lstrip("/") path = "abfs://" + path ops = infer_storage_options(path) if "username" in ops: if ops.get("username", None): ops["path"] = ops["username"] + ops["path"] # we need to make sure that the path retains # the format {host}/{path} # here host is the container_name elif ops.get("host", None): if ( ops["host"].count(STORE_SUFFIX) == 0 ): # no store-suffix, so this is container-name ops["path"] = ops["host"] + ops["path"] logger.debug(f"_strip_protocol({path}) = {ops}") return ops["path"] def _get_credential_from_service_principal(self): """ Create a Credential for authentication. This can include a TokenCredential client_id, client_secret and tenant_id Returns ------- Tuple of (Async Credential, Sync Credential). """ from azure.identity import ClientSecretCredential from azure.identity.aio import ( ClientSecretCredential as AIOClientSecretCredential, ) async_credential = AIOClientSecretCredential( tenant_id=self.tenant_id, client_id=self.client_id, client_secret=self.client_secret, ) sync_credential = ClientSecretCredential( tenant_id=self.tenant_id, client_id=self.client_id, client_secret=self.client_secret, ) return (async_credential, sync_credential) def _get_default_azure_credential(self, **kwargs): try: from azure.identity.aio import ( DefaultAzureCredential as AIODefaultAzureCredential, ) asyncio.get_child_watcher().attach_loop(self.loop) self.credential = AIODefaultAzureCredential() self.do_connect() except: # noqa: E722 raise ClientAuthenticationError( "No explict credentials provided. Failed with DefaultAzureCredential!" ) def do_connect(self): """Connect to the BlobServiceClient, using user-specified connection details. Tries credentials first, then connection string and finally account key Raises ------ ValueError if none of the connection details are available """ try: if self.connection_string is not None: self.service_client = AIOBlobServiceClient.from_connection_string( conn_str=self.connection_string ) elif self.account_name: self.account_url: str = ( f"https://{self.account_name}.blob.core.windows.net" ) creds = [self.credential, self.account_key] if any(creds): self.service_client = [ AIOBlobServiceClient( account_url=self.account_url, credential=cred, _location_mode=self.location_mode, ) for cred in creds if cred is not None ][0] elif self.sas_token is not None: if not self.sas_token.startswith("?"): self.sas_token = f"?{self.sas_token}" self.service_client = AIOBlobServiceClient( account_url=self.account_url + self.sas_token, credential=None, _location_mode=self.location_mode, ) elif self.anon is False: self._get_default_azure_credential() else: # Fall back to anonymous login, and assume public container self.service_client = AIOBlobServiceClient( account_url=self.account_url ) else: raise ValueError( "Must provide either a connection_string or account_name with credentials!!" ) except RuntimeError: loop = get_loop() asyncio.set_event_loop(loop) self.do_connect() except Exception as e: raise ValueError(f"unable to connect to account for {e}") def split_path(self, path, delimiter="/", return_container: bool = False, **kwargs): """ Normalize ABFS path string into bucket and key. Parameters ---------- path : string Input path, like `abfs://my_container/path/to/file` delimiter: string Delimiter used to split the path return_container: bool Examples -------- >>> split_path("abfs://my_container/path/to/file") ['my_container', 'path/to/file'] """ if path in ["", delimiter]: return "", "" path = self._strip_protocol(path) path = path.lstrip(delimiter) if "/" not in path: # this means path is the container_name return path, "" else: return path.split(delimiter, 1) def info(self, path, refresh=False, **kwargs): try: fetch_from_azure = (path and self._ls_from_cache(path) is None) or refresh except Exception: fetch_from_azure = True if fetch_from_azure: return sync(self.loop, self._info, path, refresh) return super().info(path) async def _info(self, path, refresh=False, **kwargs): """Give details of entry at path Returns a single dictionary, with exactly the same information as ``ls`` would with ``detail=True``. The default implementation should calls ls and could be overridden by a shortcut. kwargs are passed on to ```ls()``. Some file systems might not be able to measure the file's size, in which case, the returned dict will include ``'size': None``. Returns ------- dict with keys: name (full path in the FS), size (in bytes), type (file, directory, or something else) and other FS-specific keys. """ if refresh: invalidate_cache = True else: invalidate_cache = False path = self._strip_protocol(path) out = await self._ls( self._parent(path), invalidate_cache=invalidate_cache, **kwargs ) out = [o for o in out if o["name"].rstrip("/") == path] if out: return out[0] out = await self._ls(path, invalidate_cache=invalidate_cache, **kwargs) path = path.rstrip("/") out1 = [o for o in out if o["name"].rstrip("/") == path] if len(out1) == 1: if "size" not in out1[0]: out1[0]["size"] = None return out1[0] elif len(out1) > 1 or out: return {"name": path, "size": None, "type": "directory"} else: raise FileNotFoundError def glob(self, path, **kwargs): return sync(self.loop, self._glob, path) async def _glob(self, path, **kwargs): """ Find files by glob-matching. If the path ends with '/' and does not contain "*", it is essentially the same as ``ls(path)``, returning only files. We support ``"**"``, ``"?"`` and ``"[..]"``. kwargs are passed to ``ls``. """ import re ends = path.endswith("/") path = self._strip_protocol(path) indstar = path.find("*") if path.find("*") >= 0 else len(path) indques = path.find("?") if path.find("?") >= 0 else len(path) indbrace = path.find("[") if path.find("[") >= 0 else len(path) ind = min(indstar, indques, indbrace) detail = kwargs.pop("detail", False) if not has_magic(path): root = path depth = 1 if ends: path += "/*" elif await self._exists(path): if not detail: return [path] else: return {path: await self._info(path)} else: if not detail: return [] # glob of non-existent returns empty else: return {} elif "/" in path[:ind]: ind2 = path[:ind].rindex("/") root = path[: ind2 + 1] depth = 20 if "**" in path else path[ind2 + 1 :].count("/") + 1 else: root = "" depth = 20 if "**" in path else 1 allpaths = await self._glob_find( root, maxdepth=depth, withdirs=True, detail=True, **kwargs ) pattern = ( "^" + ( path.replace("\\", r"\\") .replace(".", r"\.") .replace("+", r"\+") .replace("//", "/") .replace("(", r"\(") .replace(")", r"\)") .replace("|", r"\|") .rstrip("/") .replace("?", ".") ) + "$" ) pattern = re.sub("[*]{2}", "=PLACEHOLDER=", pattern) pattern = re.sub("[*]", "[^/]*", pattern) pattern = re.compile(pattern.replace("=PLACEHOLDER=", ".*")) out = { p: allpaths[p] for p in sorted(allpaths) if pattern.match(p.replace("//", "/").rstrip("/")) } if detail: return out else: return list(out) def ls( self, path: str, detail: bool = False, invalidate_cache: bool = False, delimiter: str = "/", return_glob: bool = False, **kwargs, ): files = sync( self.loop, self._ls, path=path, invalidate_cache=invalidate_cache, delimiter=delimiter, return_glob=return_glob, ) if detail: return files else: return list(sorted(set([f["name"] for f in files]))) async def _ls( self, path: str, invalidate_cache: bool = False, delimiter: str = "/", return_glob: bool = False, **kwargs, ): """ Create a list of blob names from a blob container Parameters ---------- path: str Path to an Azure Blob with its container name detail: bool If False, return a list of blob names, else a list of dictionaries with blob details invalidate_cache: bool If True, do not use the cache delimiter: str Delimiter used to split paths return_glob: bool """ logger.debug("abfs.ls() is searching for %s", path) target_path = path.strip("/") container, path = self.split_path(path) if invalidate_cache: self.dircache.clear() cache = {} cache.update(self.dircache) if (container in ["", ".", "*", delimiter]) and (path in ["", delimiter]): if _ROOT_PATH not in cache or invalidate_cache or return_glob: # This is the case where only the containers are being returned logger.info( "Returning a list of containers in the azure blob storage account" ) contents = self.service_client.list_containers(include_metadata=True) containers = [c async for c in contents] files = await self._details(containers) cache[_ROOT_PATH] = files self.dircache.update(cache) return cache[_ROOT_PATH] else: if target_path not in cache or invalidate_cache or return_glob: if container not in ["", delimiter]: # This is the case where the container name is passed async with self.service_client.get_container_client( container=container ) as cc: path = path.strip("/") blobs = cc.walk_blobs( include=["metadata"], name_starts_with=path ) # Check the depth that needs to be screened depth = target_path.count("/") outblobs = [] try: async for next_blob in blobs: if depth in [0, 1] and path == "": outblobs.append(next_blob) elif isinstance(next_blob, BlobProperties): if next_blob["name"].count("/") == depth: outblobs.append(next_blob) elif not next_blob["name"].endswith("/") and ( next_blob["name"].count("/") == (depth - 1) ): outblobs.append(next_blob) else: async for blob_ in next_blob: if isinstance(blob_, BlobProperties) or isinstance( blob_, BlobPrefix ): if blob_["name"].endswith("/"): if ( blob_["name"].rstrip("/").count("/") == depth ): outblobs.append(blob_) elif ( blob_["name"].count("/") == depth and blob_["size"] == 0 ): outblobs.append(blob_) else: pass elif blob_["name"].count("/") == (depth): outblobs.append(blob_) else: pass except ResourceNotFoundError: raise FileNotFoundError finalblobs = await self._details( outblobs, target_path=target_path, return_glob=return_glob ) if return_glob: return finalblobs finalblobs = await self._details(outblobs, target_path=target_path) if not finalblobs: if not await self._exists(target_path): raise FileNotFoundError return [] cache[target_path] = finalblobs self.dircache[target_path] = finalblobs return cache[target_path] async def _details( self, contents, delimiter="/", return_glob: bool = False, target_path="", **kwargs, ): """ Return a list of dictionaries of specifying details about the contents Parameters ---------- contents delimiter: str Delimiter used to separate containers and files return_glob: bool Returns ------- List of dicts Returns details about the contents, such as name, size and type """ output = [] for content in contents: data = { key: content[key] for key in FORWARDED_BLOB_PROPERTIES if content.has_key(key) # NOQA } if content.has_key("container"): # NOQA fname = f"{content.container}{delimiter}{content.name}" fname = fname.rstrip(delimiter) if content.has_key("size"): # NOQA data.update({"name": fname}) data.update({"size": content.size}) data.update({"type": "file"}) else: data.update({"name": fname}) data.update({"size": None}) data.update({"type": "directory"}) else: fname = f"{content.name}" data.update({"name": fname}) data.update({"size": None}) data.update({"type": "directory"}) if "metadata" in data.keys(): if data["metadata"] is not None: if ( "is_directory" in data["metadata"].keys() and data["metadata"]["is_directory"] == "true" ): data.update({"type": "directory"}) data.update({"size": None}) elif ( "is_directory" in data["metadata"].keys() and data["metadata"]["is_directory"] == "false" ): data.update({"type": "file"}) elif ( "hdi_isfolder" in data["metadata"].keys() and data["metadata"]["hdi_isfolder"] == "true" ): data.update({"type": "directory"}) data.update({"size": None}) else: pass if return_glob: data.update({"name": data["name"].rstrip("/")}) output.append(data) if target_path: if len(output) == 1 and output[0]["type"] == "file": # This handles the case where path is a file passed to ls return output output = await filter_blobs(output, target_path, delimiter) return output def find(self, path, withdirs=False, prefix="", **kwargs): return sync( self.loop, self._find, path=path, withdirs=withdirs, prefix=prefix, **kwargs ) async def _find(self, path, withdirs=False, prefix="", with_parent=False, **kwargs): """List all files below path. Like posix ``find`` command without conditions Parameters ---------- path : str The path (directory) to list from withdirs: bool Whether to include directory paths in the output. This is True when used by glob, but users usually only want files. prefix: str Only return files that match `^{path}/{prefix}` kwargs are passed to ``ls``. """ full_path = self._strip_protocol(path) parent_path = full_path.strip("/") + "/" target_path = f"{parent_path}{(prefix or "").lstrip("/")}" container, path = self.split_path(target_path) async with self.service_client.get_container_client( container=container ) as container_client: blobs = container_client.list_blobs( include=["metadata"], name_starts_with=path ) files = {} dir_set = set() dirs = {} detail = kwargs.pop("detail", False) try: infos = await self._details([b async for b in blobs]) except ResourceNotFoundError: # find doesn't raise but returns [] or {} instead infos = [] for info in infos: name = info["name"] parent_dir = self._parent(name).rstrip("/") + "/" if parent_dir not in dir_set and parent_dir != parent_path.strip("/"): dir_set.add(parent_dir) dirs[parent_dir] = { "name": parent_dir, "type": "directory", "size": 0, } if info["type"] == "directory": dirs[name] = info if info["type"] == "file": files[name] = info if not infos: try: file = await self._info(full_path) except FileNotFoundError: pass else: files[file["name"]] = file if withdirs: if not with_parent: dirs.pop(target_path, None) files.update(dirs) names = sorted(files) if not detail: return names return {name: files[name] for name in names} async def _glob_find(self, path, maxdepth=None, withdirs=False, **kwargs): """List all files below path in a recusrsive manner. Like posix ``find`` command without conditions Parameters ---------- path : str maxdepth: int or None If not None, the maximum number of levels to descend withdirs: bool Whether to include directory paths in the output. This is True when used by glob, but users usually only want files. kwargs are passed to ``ls``. """ # TODO: allow equivalent of -name parameter path = self._strip_protocol(path) out = dict() detail = kwargs.pop("detail", False) async for path, dirs, files in self._async_walk( path, maxdepth, detail=True, **kwargs ): if files == []: files = {} dirs = {} if withdirs: files.update(dirs) out.update({info["name"]: info for name, info in files.items()}) if await self._isfile(path) and path not in out: # walk works on directories, but find should also return [path] # when path happens to be a file out[path] = {} names = sorted(out) if not detail: return names else: return {name: out[name] for name in names} def _walk(self, path, dirs, files): for p, d, f in zip([path], [dirs], [files]): yield p, d, f async def _async_walk(self, path: str, maxdepth=None, **kwargs): """Return all files belows path List all files, recursing into subdirectories; output is iterator-style, like ``os.walk()``. For a simple list of files, ``find()`` is available. Note that the "files" outputted will include anything that is not a directory, such as links. Parameters ---------- path: str Root to recurse into maxdepth: int Maximum recursion depth. None means limitless, but not recommended on link-based file-systems. **kwargs are passed to ``ls`` """ path = self._strip_protocol(path) full_dirs = {} dirs = {} files = {} detail = kwargs.pop("detail", False) try: listing = await self._ls(path, return_glob=True, **kwargs) except (FileNotFoundError, IOError): listing = [] for info in listing: # each info name must be at least [path]/part , but here # we check also for names like [path]/part/ pathname = info["name"].rstrip("/") name = pathname.rsplit("/", 1)[-1] if info["type"] == "directory" and pathname != path: # do not include "self" path full_dirs[pathname] = info dirs[name] = info elif pathname == path: # file-like with same name as give path files[""] = info else: files[name] = info if detail: for p, d, f in self._walk(path, dirs, files): yield p, d, f else: yield path, list(dirs), list(files) if maxdepth is not None: maxdepth -= 1 if maxdepth < 1: return for d in full_dirs: async for path, dirs, files in self._async_walk( d, maxdepth=maxdepth, detail=detail, **kwargs ): yield path, dirs, files async def _container_exists(self, container_name): try: async with self.service_client.get_container_client( container_name ) as client: await client.get_container_properties() except ResourceNotFoundError: return False except Exception as e: raise ValueError( f"Failed to fetch container properties for {container_name} for {e}" ) from e else: return True async def _mkdir(self, path, create_parents=True, delimiter="/", **kwargs): """ Mkdir is a no-op for creating anything except top-level containers. This aligns to the Azure Blob Filesystem flat hierarchy Parameters ---------- path: str The path to create create_parents: bool If True (default), create the Azure Container if it does not exist delimiter: str Delimiter to use when splitting the path """ fullpath = path container_name, path = self.split_path(path, delimiter=delimiter) container_exists = await self._container_exists(container_name) if not create_parents and not container_exists: raise PermissionError( "Azure Container does not exist. Set create_parents=True to create!!" ) if container_exists and not kwargs.get("exist_ok", True): raise FileExistsError( f"Cannot overwrite existing Azure container -- {container_name} already exists." ) if not container_exists: try: await self.service_client.create_container(container_name) self.invalidate_cache(_ROOT_PATH) except Exception as e: raise ValueError( f"Proposed container_name of {container_name} does not meet Azure requirements with error {e}!" ) from e self.invalidate_cache(self._parent(fullpath)) mkdir = sync_wrapper(_mkdir) def makedir(self, path, exist_ok=False): """ Create directory entry at path Parameters ---------- path: str The path to create delimiter: str Delimiter to use when splitting the path exist_ok: bool If False (default), raise an error if the directory already exists. """ try: self.mkdir(path, create_parents=True, exist_ok=exist_ok) except FileExistsError: if exist_ok: pass else: raise async def _rm(self, path, recursive=False, maxdepth=None, **kwargs): """Delete files. Parameters ---------- path: str or list of str File(s) to delete. recursive: bool If file(s) are directories, recursively delete contents and then also remove the directory maxdepth: int or None Depth to pass to walk for finding files to delete, if recursive. If None, there will be no limit and infinite recursion may be possible. """ path = await self._expand_path( path, recursive=recursive, maxdepth=maxdepth, with_parent=True ) for p in reversed(path): await self._rm_file(p) self.invalidate_cache() rm = sync_wrapper(_rm) async def _rm_file(self, path, delimiter="/", **kwargs): """ Delete a given file Parameters ---------- path: str Path to file to delete delimiter: str Delimiter to use when splitting the path """ try: kind = await self._info(path) container_name, path = self.split_path(path, delimiter=delimiter) kind = kind["type"] if path != "": async with self.service_client.get_container_client( container=container_name ) as cc: await cc.delete_blob(path.rstrip(delimiter)) elif kind == "directory": await self._rmdir(container_name) else: raise RuntimeError(f"Unable to remove {path}") except ResourceNotFoundError: pass except FileNotFoundError: pass except Exception as e: raise RuntimeError(f"Failed to remove {path} for {e}") self.invalidate_cache(self._parent(path)) sync_wrapper(_rm_file) def rmdir(self, path: str, delimiter="/", **kwargs): sync(self.loop, self._rmdir, path, delimiter=delimiter, **kwargs) async def _rmdir(self, path: str, delimiter="/", **kwargs): """ Remove a directory, if empty Parameters ---------- path: str Path of directory to remove delimiter: str Delimiter to use when splitting the path """ container_name, path = self.split_path(path, delimiter=delimiter) container_exists = await self._container_exists(container_name) if container_exists and not path: await self.service_client.delete_container(container_name) self.invalidate_cache(_ROOT_PATH) def size(self, path): return sync(self.loop, self._size, path) async def _size(self, path): """Size in bytes of file""" res = await self._info(path) size = res.get("size", None) return size def isfile(self, path): return sync(self.loop, self._isfile, path) async def _isfile(self, path): """Is this entry file-like?""" try: path_ = path.split("/")[:-1] path_ = "/".join([p for p in path_]) if self.dircache[path_]: for fp in self.dircache[path_]: if fp["name"] == path and fp["type"] == "file": return True except KeyError: pass except FileNotFoundError: pass try: container_name, path = self.split_path(path) if not path: # A container can not be a file return False else: try: async with self.service_client.get_blob_client( container_name, path ) as bc: props = await bc.get_blob_properties() if props["metadata"]["is_directory"] == "false": return True except ResourceNotFoundError: return False except KeyError: details = await self._details([props]) return details[0]["type"] == "file" except: # noqa: E722 return False def isdir(self, path): return sync(self.loop, self._isdir, path) async def _isdir(self, path): """Is this entry directory-like?""" if path in self.dircache: for fp in self.dircache[path]: # Files will contain themselves in the cache, but # a directory can not contain itself if fp["name"] != path: return True try: container_name, path_ = self.split_path(path) if not path_: return await self._container_exists(container_name) else: if await self._exists(path) and not await self._isfile(path): return True else: return False except IOError: return False def exists(self, path): return sync(self.loop, self._exists, path) async def _exists(self, path): """Is there a file at the given path""" try: if self._ls_from_cache(path): return True except FileNotFoundError: pass except KeyError: pass container_name, path = self.split_path(path) if not path: if container_name: return await self._container_exists(container_name) else: # Empty paths exist by definition return True async with self.service_client.get_blob_client(container_name, path) as bc: if await bc.exists(): return True dir_path = path.rstrip("/") + "/" async with self.service_client.get_container_client( container=container_name ) as container_client: async for blob in container_client.list_blobs( results_per_page=1, name_starts_with=dir_path ): return True else: return False async def _pipe_file(self, path, value, overwrite=True, **kwargs): """Set the bytes of given file""" container_name, path = self.split_path(path) async with self.service_client.get_blob_client( container=container_name, blob=path ) as bc: result = await bc.upload_blob( data=value, overwrite=overwrite, metadata={"is_directory": "false"} ) self.invalidate_cache(self._parent(path)) return result pipe_file = sync_wrapper(_pipe_file) async def _cat_file(self, path, start=None, end=None, **kwargs): path = self._strip_protocol(path) if end is not None: start = start or 0 # download_blob requires start if length is provided. length = end - start else: length = None container_name, path = self.split_path(path) async with self.service_client.get_blob_client( container=container_name, blob=path ) as bc: try: stream = await bc.download_blob(offset=start, length=length) except ResourceNotFoundError as e: raise FileNotFoundError from e result = await stream.readall() return result def cat(self, path, recursive=False, on_error="raise", **kwargs): """Fetch (potentially multiple) paths' contents Returns a dict of {path: contents} if there are multiple paths or the path has been otherwise expanded on_error : "raise", "omit", "return" If raise, an underlying exception will be raised (converted to KeyError if the type is in self.missing_exceptions); if omit, keys with exception will simply not be included in the output; if "return", all keys are included in the output, but the value will be bytes or an exception instance. """ paths = self.expand_path(path, recursive=recursive) if ( len(paths) > 1 or isinstance(path, list) or paths[0] != self._strip_protocol(path) ): out = {} for path in paths: try: out[path] = self.cat_file(path, **kwargs) except Exception as e: if on_error == "raise": raise if on_error == "return": out[path] = e return out else: return self.cat_file(paths[0]) def url(self, path, expires=3600, **kwargs): return sync(self.loop, self._url, path, expires, **kwargs) async def _url(self, path, expires=3600, **kwargs): """Generate presigned URL to access path by HTTP Parameters ---------- path : string the key path we are interested in expires : int the number of seconds this signature will be good for. """ container_name, blob = self.split_path(path) sas_token = generate_blob_sas( account_name=self.account_name, container_name=container_name, blob_name=blob, account_key=self.account_key, permission=BlobSasPermissions(read=True), expiry=datetime.utcnow() + timedelta(seconds=expires), ) async with self.service_client.get_blob_client(container_name, blob) as bc: url = f"{bc.url}?{sas_token}" return url def expand_path(self, path, recursive=False, maxdepth=None): return sync(self.loop, self._expand_path, path, recursive, maxdepth) async def _expand_path(self, path, recursive=False, maxdepth=None, **kwargs): """Turn one or more globs or directories into a list of all matching files""" with_parent = kwargs.get( "with_parent", False ) # Sets whether to return the parent dir if isinstance(path, list): path = [f"{p.strip("/")}" for p in path if not p.endswith("*")] else: if not path.endswith("*"): path = f"{path.strip("/")}" if isinstance(path, str): out = await self._expand_path( [path], recursive, maxdepth, with_parent=with_parent ) else: out = set() path = [self._strip_protocol(p) for p in path] for p in path: if has_magic(p): bit = set(await self._glob(p)) out |= bit if recursive: bit2 = set(await self._expand_path(p)) out |= bit2 continue elif recursive: rec = set( await self._find(p, withdirs=True, with_parent=with_parent) ) out |= rec if p not in out and ( recursive is False or await self._exists(p) or await self._exists(p.rstrip("/")) ): if not await self._exists(p): # This is to verify that we don't miss files p = p.rstrip("/") if not await self._exists(p): continue out.add(p) if not out: raise FileNotFoundError return list(sorted(out)) async def _put_file( self, lpath, rpath, delimiter="/", overwrite=False, callback=None, **kwargws ): """ Copy single file to remote :param lpath: Path to local file :param rpath: Path to remote file :param delimitier: Filepath delimiter :param overwrite: Boolean (False). If True, overwrite the existing file present """ container_name, path = self.split_path(rpath, delimiter=delimiter) if os.path.isdir(lpath): self.makedirs(rpath, exist_ok=True) else: try: with open(lpath, "rb") as f1: async with self.service_client.get_blob_client( container_name, path ) as bc: await bc.upload_blob( f1, overwrite=overwrite, metadata={"is_directory": "false"}, raw_response_hook=make_callback( "upload_stream_current", callback ), ) self.invalidate_cache() except ResourceExistsError: raise FileExistsError("File already exists!") except ResourceNotFoundError: if not await self._exists(container_name): raise FileNotFoundError("Container does not exist.") await self._put_file(lpath, rpath, delimiter, overwrite) self.invalidate_cache() put_file = sync_wrapper(_put_file) async def _cp_file(self, path1, path2, **kwargs): """Copy the file at path1 to path2""" container1, path1 = self.split_path(path1, delimiter="/") container2, path2 = self.split_path(path2, delimiter="/") cc1 = self.service_client.get_container_client(container1) blobclient1 = cc1.get_blob_client(blob=path1) if container1 == container2: blobclient2 = cc1.get_blob_client(blob=path2) else: cc2 = self.service_client.get_container_client(container2) blobclient2 = cc2.get_blob_client(blob=path2) await blobclient2.start_copy_from_url(blobclient1.url) self.invalidate_cache(container1) self.invalidate_cache(container2) cp_file = sync_wrapper(_cp_file) def upload(self, lpath, rpath, recursive=False, **kwargs): """Alias of :ref:`FilesystemSpec.put`.""" return self.put(lpath, rpath, recursive=recursive, **kwargs) def download(self, rpath, lpath, recursive=False, **kwargs): """Alias of :ref:`FilesystemSpec.get`.""" return self.get(rpath, lpath, recursive=recursive, **kwargs) async def _get_file( self, rpath, lpath, recursive=False, delimiter="/", callback=None, **kwargs ): """Copy single file remote to local""" if os.path.isdir(lpath): return container_name, path = self.split_path(rpath, delimiter=delimiter) try: async with self.service_client.get_blob_client( container_name, path.rstrip(delimiter) ) as bc: with open(lpath, "wb") as my_blob: stream = await bc.download_blob( raw_response_hook=make_callback( "download_stream_current", callback ) ) await stream.readinto(my_blob) except ResourceNotFoundError as exception: raise FileNotFoundError from exception get_file = sync_wrapper(_get_file) def getxattr(self, path, attr): meta = self.info(path).get("metadata", {}) return meta[attr] async def _setxattrs(self, rpath, **kwargs): container_name, path = self.split_path(rpath) try: async with self.service_client.get_blob_client(container_name, path) as bc: await bc.set_blob_metadata(metadata=kwargs) self.invalidate_cache(self._parent(rpath)) except Exception as e: raise FileNotFoundError(f"File not found for {e}") setxattrs = sync_wrapper(_setxattrs) def invalidate_cache(self, path=None): if path is None: self.dircache.clear() else: self.dircache.pop(path, None) super(AzureBlobFileSystem, self).invalidate_cache(path) def _open( self, path: str, mode: str = "rb", block_size: int = None, autocommit: bool = True, cache_options: dict = {}, cache_type="readahead", metadata=None, **kwargs, ): """Open a file on the datalake, or a block blob Parameters ---------- path: str Path to file to open mode: str What mode to open the file in - defaults to "rb" block_size: int Size per block for multi-part downloads. autocommit: bool Whether or not to write to the destination directly cache_type: str One of "readahead", "none", "mmap", "bytes", defaults to "readahead" Caching policy in read mode. See the definitions here: https://filesystem-spec.readthedocs.io/en/latest/api.html#readbuffering """ logger.debug(f"_open: {path}") return AzureBlobFile( fs=self, path=path, mode=mode, block_size=block_size, autocommit=autocommit, cache_options=cache_options, cache_type=cache_type, metadata=metadata, **kwargs, ) class AzureBlobFile(AbstractBufferedFile): """File-like operations on Azure Blobs""" DEFAULT_BLOCK_SIZE = 5 * 2**20 def __init__( self, fs: AzureBlobFileSystem, path: str, mode: str = "rb", block_size="default", autocommit: bool = True, cache_type: str = "bytes", cache_options: dict = {}, metadata=None, **kwargs, ): """ Represents a file on AzureBlobStorage that implements buffered reading and writing Parameters ---------- fs: AzureBlobFileSystem An instance of the filesystem path: str The location of the file on the filesystem mode: str What mode to open the file in. Defaults to "rb" block_size: int, str Buffer size for reading and writing. The string "default" will use the class default autocommit: bool Whether or not to write to the destination directly cache_type: str One of "readahead", "none", "mmap", "bytes", defaults to "readahead" Caching policy in read mode. See the definitions in ``core``. cache_options : dict Additional options passed to the constructor for the cache specified by `cache_type`. kwargs: dict Passed to AbstractBufferedFile """ from fsspec.core import caches container_name, blob = fs.split_path(path) self.fs = fs self.path = path self.mode = mode self.container_name = container_name self.blob = blob self.block_size = block_size try: # Need to confirm there is an event loop running in # the thread. If not, create the fsspec loop # and set it. This is to handle issues with # Async Credentials from the Azure SDK loop = get_running_loop() except RuntimeError: loop = get_loop() asyncio.set_event_loop(loop) self.loop = self.fs.loop or get_loop() self.container_client = ( fs.service_client.get_container_client(self.container_name) or self.connect_client() ) self.blocksize = ( self.DEFAULT_BLOCK_SIZE if block_size in ["default", None] else block_size ) self.loc = 0 self.autocommit = autocommit self.end = None self.start = None self.closed = False if cache_options is None: cache_options = {} if "trim" in kwargs: warnings.warn( "Passing 'trim' to control the cache behavior has been deprecated. " "Specify it within the 'cache_options' argument instead.", FutureWarning, ) cache_options["trim"] = kwargs.pop("trim") self.metadata = None self.kwargs = kwargs if self.mode not in {"ab", "rb", "wb"}: raise NotImplementedError("File mode not supported") if self.mode == "rb": if not hasattr(self, "details"): self.details = self.fs.info(self.path) self.size = self.details["size"] self.cache = caches[cache_type]( blocksize=self.blocksize, fetcher=self._fetch_range, size=self.size, **cache_options, ) self.metadata = sync( self.loop, get_blob_metadata, self.container_client, self.blob ) else: self.metadata = metadata or {"is_directory": "false"} self.buffer = io.BytesIO() self.offset = None self.forced = False self.location = None def close(self): """Close file and azure client.""" asyncio.run_coroutine_threadsafe(close_container_client(self), loop=self.loop) super().close() def connect_client(self): """Connect to the Asynchronous BlobServiceClient, using user-specified connection details. Tries credentials first, then connection string and finally account key Raises ------ ValueError if none of the connection details are available """ try: self.fs.account_url: str = ( f"https://{self.fs.account_name}.blob.core.windows.net" ) creds = [self.fs.sync_credential, self.fs.account_key, self.fs.credential] if any(creds): self.container_client = [ AIOBlobServiceClient( account_url=self.fs.account_url, credential=cred, _location_mode=self.fs.location_mode, ).get_container_client(self.container_name) for cred in creds if cred is not None ][0] elif self.fs.connection_string is not None: self.container_client = AIOBlobServiceClient.from_connection_string( conn_str=self.fs.connection_string ).get_container_client(self.container_name) elif self.fs.sas_token is not None: self.container_client = AIOBlobServiceClient( account_url=self.fs.account_url + self.fs.sas_token, credential=None ).get_container_client(self.container_name) else: self.container_client = AIOBlobServiceClient( account_url=self.fs.account_url ).get_container_client(self.container_name) except Exception as e: raise ValueError( f"Unable to fetch container_client with provided params for {e}!!" ) async def _async_fetch_range(self, start: int, end: int = None, **kwargs): """ Download a chunk of data specified by start and end Parameters ---------- start: int Start byte position to download blob from end: int End of the file chunk to download """ if end and (end > self.size): length = self.size - start else: length = None if end is None else (end - start) async with self.container_client: stream = await self.container_client.download_blob( blob=self.blob, offset=start, length=length ) blob = await stream.readall() return blob _fetch_range = sync_wrapper(_async_fetch_range) async def _reinitiate_async_upload(self, **kwargs): pass async def _async_initiate_upload(self, **kwargs): """Prepare a remote file upload""" self._block_list = [] if self.mode == "wb": try: await self.container_client.delete_blob(self.blob) except ResourceNotFoundError: pass except HttpResponseError: pass else: await self._reinitiate_async_upload() elif self.mode == "ab": if not await self.fs._exists(self.path): async with self.container_client.get_blob_client(blob=self.blob) as bc: await bc.create_append_blob(metadata=self.metadata) else: raise ValueError( "File operation modes other than wb are not yet supported for writing" ) _initiate_upload = sync_wrapper(_async_initiate_upload) async def _async_upload_chunk(self, final: bool = False, **kwargs): """ Write one part of a multi-block file upload Parameters ---------- final: bool This is the last block, so should complete file, if self.autocommit is True. """ data = self.buffer.getvalue() length = len(data) block_id = len(self._block_list) block_id = f"{block_id:07d}" if self.mode == "wb": try: async with self.container_client.get_blob_client(blob=self.blob) as bc: await bc.stage_block( block_id=block_id, data=data, length=length, ) self._block_list.append(block_id) if final: block_list = [BlobBlock(_id) for _id in self._block_list] async with self.container_client.get_blob_client( blob=self.blob ) as bc: await bc.commit_block_list( block_list=block_list, metadata=self.metadata ) except Exception as e: # This step handles the situation where data="" and length=0 # which is throws an InvalidHeader error from Azure, so instead # of staging a block, we directly upload the empty blob # This isn't actually tested, since Azureite behaves differently. if block_id == "0000000" and length == 0 and final: async with self.container_client.get_blob_client( blob=self.blob ) as bc: await bc.upload_blob(data=data, metadata=self.metadata) elif length == 0 and final: # just finalize block_list = [BlobBlock(_id) for _id in self._block_list] async with self.container_client.get_blob_client( blob=self.blob ) as bc: await bc.commit_block_list( block_list=block_list, metadata=self.metadata ) else: raise RuntimeError(f"Failed to upload block{e}!") from e elif self.mode == "ab": async with self.container_client.get_blob_client(blob=self.blob) as bc: await bc.upload_blob( data=data, length=length, blob_type=BlobType.AppendBlob, metadata=self.metadata, ) else: raise ValueError( "File operation modes other than wb or ab are not yet supported for upload_chunk" ) _upload_chunk = sync_wrapper(_async_upload_chunk) def __del__(self): try: if not self.closed: self.close() except TypeError: pass
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import asyncio import io import logging import os import warnings import weakref from datetime import datetime, timedelta from glob import has_magic from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, ) from azure.datalake.store import AzureDLFileSystem, lib from azure.datalake.store.core import AzureDLFile, AzureDLPath from azure.storage.blob import BlobSasPermissions, generate_blob_sas from azure.storage.blob._models import BlobBlock, BlobProperties, BlobType from azure.storage.blob._shared.base_client import create_configuration from azure.storage.blob.aio import BlobServiceClient as AIOBlobServiceClient from azure.storage.blob.aio._list_blobs_helper import BlobPrefix from fsspec import AbstractFileSystem from fsspec.asyn import AsyncFileSystem, get_loop, get_running_loop, sync, sync_wrapper from fsspec.spec import AbstractBufferedFile from fsspec.utils import infer_storage_options, tokenize from .utils import ( close_container_client, close_service_client, filter_blobs, get_blob_metadata, ) logger = logging.getLogger(__name__) FORWARDED_BLOB_PROPERTIES = [ "metadata", "creation_time", "deleted", "deleted_time", "last_modified", "content_time", "content_settings", "remaining_retention_days", "archive_status", "last_accessed_on", "etag", "tags", "tag_count", ] _ROOT_PATH = "/" class AzureDatalakeFileSystem(AbstractFileSystem): """ Access Azure Datalake Gen1 as if it were a file system. This exposes a filesystem-like API on top of Azure Datalake Storage Parameters ----------- tenant_id: string Azure tenant, also known as the subscription id client_id: string The username or serivceprincipal id client_secret: string The access key store_name: string (optional) The name of the datalake account being accessed. Should be inferred from the urlpath if using with Dask read_xxx and to_xxx methods. Examples -------- >>> adl = AzureDatalakeFileSystem(tenant_id="xxxx", client_id="xxxx", ... client_secret="xxxx") >>> adl.ls('') Sharded Parquet & CSV files can be read as >>> storage_options = dict(tennant_id=TENNANT_ID, client_id=CLIENT_ID, ... client_secret=CLIENT_SECRET) # doctest: +SKIP >>> ddf = dd.read_parquet('adl://store_name/folder/filename.parquet', ... storage_options=storage_options) # doctest: +SKIP >>> ddf = dd.read_csv('adl://store_name/folder/*.csv' ... storage_options=storage_options) # doctest: +SKIP Sharded Parquet and CSV files can be written as >>> ddf.to_parquet("adl://store_name/folder/filename.parquet", ... storage_options=storage_options) # doctest: +SKIP >>> ddf.to_csv('adl://store_name/folder/*.csv' ... storage_options=storage_options) # doctest: +SKIP """ protocol = "adl" def __init__(self, tenant_id, client_id, client_secret, store_name): super().__init__() self.tenant_id = tenant_id self.client_id = client_id self.client_secret = client_secret self.store_name = store_name self.do_connect() @staticmethod def _get_kwargs_from_urls(paths): """Get the store_name from the urlpath and pass to storage_options""" ops = infer_storage_options(paths) out = {} if ops.get("host", None): out["store_name"] = ops["host"] return out @classmethod def _strip_protocol(cls, path): ops = infer_storage_options(path) return ops["path"] def do_connect(self): """Establish connection object.""" token = lib.auth( tenant_id=self.tenant_id, client_id=self.client_id, client_secret=self.client_secret, ) self.azure_fs = AzureDLFileSystem(token=token, store_name=self.store_name) def ls(self, path, detail=False, invalidate_cache=True, **kwargs): files = self.azure_fs.ls( path=path, detail=detail, invalidate_cache=invalidate_cache ) for file in (file for file in files if type(file) is dict): if "type" in file: file["type"] = file["type"].lower() if "length" in file: file["size"] = file["length"] return files def info(self, path, invalidate_cache=True, expected_error_code=404, **kwargs): info = self.azure_fs.info( path=path, invalidate_cache=invalidate_cache, expected_error_code=expected_error_code, ) info["size"] = info["length"] """Azure FS uses upper case type values but fsspec is expecting lower case""" info["type"] = info["type"].lower() return info def _trim_filename(self, fn, **kwargs): """Determine what kind of filestore this is and return the path""" so = infer_storage_options(fn) fileparts = so["path"] return fileparts def glob(self, path, details=False, invalidate_cache=True, **kwargs): """For a template path, return matching files""" adlpaths = self._trim_filename(path) filepaths = self.azure_fs.glob( adlpaths, details=details, invalidate_cache=invalidate_cache ) return filepaths def isdir(self, path, **kwargs): """Is this entry directory-like?""" try: return self.info(path)["type"].lower() == "directory" except FileNotFoundError: return False def isfile(self, path, **kwargs): """Is this entry file-like?""" try: return self.azure_fs.info(path)["type"].lower() == "file" except Exception: return False def _open( self, path, mode="rb", block_size=None, autocommit=True, cache_options: dict = {}, **kwargs, ): return AzureDatalakeFile(self, path, mode=mode) def read_block(self, fn, offset, length, delimiter=None, **kwargs): return self.azure_fs.read_block(fn, offset, length, delimiter) def ukey(self, path): return tokenize(self.info(path)["modificationTime"]) def size(self, path): return self.info(path)["length"] def rmdir(self, path): """Remove a directory, if empty""" self.azure_fs.rmdir(path) def rm_file(self, path): """Delete a file""" self.azure_fs.rm(path) def __getstate__(self): dic = self.__dict__.copy() logger.debug("Serialize with state: %s", dic) return dic def __setstate__(self, state): logger.debug("De-serialize with state: %s", state) self.__dict__.update(state) self.do_connect() class AzureDatalakeFile(AzureDLFile): # TODO: refoctor this. I suspect we actually want to compose an # AbstractBufferedFile with an AzureDLFile. def __init__( self, fs, path, mode="rb", autocommit=True, block_size=2**25, cache_type="bytes", cache_options=None, *, delimiter=None, **kwargs, ): super().__init__( azure=fs.azure_fs, path=AzureDLPath(path), mode=mode, blocksize=block_size, delimiter=delimiter, ) self.fs = fs self.path = AzureDLPath(path) self.mode = mode def seek(self, loc: int, whence: int = 0, **kwargs): """Set current file location Parameters ---------- loc: int byte location whence: {0, 1, 2} from start of file, current location or end of file, resp. """ loc = int(loc) if not self.mode == "rb": raise ValueError("Seek only available in read mode") if whence == 0: nloc = loc elif whence == 1: nloc = self.loc + loc elif whence == 2: nloc = self.size + loc else: raise ValueError("invalid whence (%s, should be 0, 1 or 2)" % whence) if nloc < 0: raise ValueError("Seek before start of file") self.loc = nloc return self.loc # https://github.com/Azure/azure-sdk-for-python/issues/11419#issuecomment-628143480 def make_callback(key, callback): if callback is None: return None sent_total = False def wrapper(response): nonlocal sent_total current = response.context.get(key) total = response.context["data_stream_total"] if not sent_total: callback.set_size(total) callback.absolute_update(current) return wrapper class AzureBlobFileSystem(AsyncFileSystem): """ Access Azure Datalake Gen2 and Azure Storage if it were a file system using Multiprotocol Access Parameters ---------- account_name: str The storage account name. This is used to authenticate requests signed with an account key and to construct the storage endpoint. It is required unless a connection string is given, or if a custom domain is used with anonymous authentication. account_key: str The storage account key. This is used for shared key authentication. If any of account key, sas token or client_id is specified, anonymous access will be used. sas_token: str A shared access signature token to use to authenticate requests instead of the account key. If account key and sas token are both specified, account key will be used to sign. If any of account key, sas token or client_id are specified, anonymous access will be used. request_session: Session The session object to use for http requests. connection_string: str If specified, this will override all other parameters besides request session. See http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ for the connection string format. socket_timeout: int If specified, this will override the default socket timeout. The timeout specified is in seconds. See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. credential: TokenCredential or SAS token The credentials with which to authenticate. Optional if the account URL already has a SAS token. Can include an instance of TokenCredential class from azure.identity blocksize: int The block size to use for download/upload operations. Defaults to the value of ``BlockBlobService.MAX_BLOCK_SIZE`` client_id: str Client ID to use when authenticating using an AD Service Principal client/secret. client_secret: str Client secret to use when authenticating using an AD Service Principal client/secret. tenant_id: str Tenant ID to use when authenticating using an AD Service Principal client/secret. default_fill_cache: bool = True Whether to use cache filling with opoen by default default_cache_type: string ('bytes') If given, the default cache_type value used for "open()". Set to none if no caching is desired. Docs in fsspec Pass on to fsspec: skip_instance_cache: to control reuse of instances use_listings_cache, listings_expiry_time, max_paths: to control reuse of directory listings Examples -------- Authentication with an account_key >>> abfs = AzureBlobFileSystem(account_name="XXXX", account_key="XXXX") >>> abfs.ls('') Authentication with an Azure ServicePrincipal >>> abfs = AzureBlobFileSystem(account_name="XXXX", tenant_id=TENANT_ID, ... client_id=CLIENT_ID, client_secret=CLIENT_SECRET) >>> abfs.ls('') Authentication with DefaultAzureCredential >>> abfs = AzureBlobFileSystem(account_name="XXXX", anon=False) >>> abfs.ls('') Read files as >>> ddf = dd.read_csv('abfs://container_name/folder/*.csv', storage_options={ ... 'account_name': ACCOUNT_NAME, 'tenant_id': TENANT_ID, 'client_id': CLIENT_ID, ... 'client_secret': CLIENT_SECRET}) ... }) Sharded Parquet & csv files can be read as: >>> ddf = dd.read_csv('abfs://container_name/folder/*.csv', storage_options={ ... 'account_name': ACCOUNT_NAME, 'account_key': ACCOUNT_KEY}) >>> ddf = dd.read_parquet('abfs://container_name/folder.parquet', storage_options={ ... 'account_name': ACCOUNT_NAME, 'account_key': ACCOUNT_KEY,}) """ protocol = "abfs" def __init__( self, account_name: str = None, account_key: str = None, connection_string: str = None, credential: str = None, sas_token: str = None, request_session=None, socket_timeout: int = None, blocksize: int = create_configuration(storage_sdk="blob").max_block_size, client_id: str = None, client_secret: str = None, tenant_id: str = None, anon: bool = True, location_mode: str = "primary", loop=None, asynchronous: bool = False, default_fill_cache: bool = True, default_cache_type: str = "bytes", **kwargs, ): super_kwargs = { k: kwargs.pop(k) for k in ["use_listings_cache", "listings_expiry_time", "max_paths"] if k in kwargs } # pass on to fsspec superclass super().__init__( asynchronous=asynchronous, loop=loop or get_loop(), **super_kwargs ) self.account_name = account_name or os.getenv("AZURE_STORAGE_ACCOUNT_NAME") self.account_key = account_key or os.getenv("AZURE_STORAGE_ACCOUNT_KEY") self.connection_string = connection_string or os.getenv( "AZURE_STORAGE_CONNECTION_STRING" ) self.sas_token = sas_token or os.getenv("AZURE_STORAGE_SAS_TOKEN") self.client_id = client_id or os.getenv("AZURE_STORAGE_CLIENT_ID") self.client_secret = client_secret or os.getenv("AZURE_STORAGE_CLIENT_SECRET") self.tenant_id = tenant_id or os.getenv("AZURE_STORAGE_TENANT_ID") self.anon = anon self.location_mode = location_mode self.credential = credential self.request_session = request_session self.socket_timeout = socket_timeout self.blocksize = blocksize self.default_fill_cache = default_fill_cache self.default_cache_type = default_cache_type if ( self.credential is None and self.account_key is None and self.sas_token is None and self.client_id is not None ): ( self.credential, self.sync_credential, ) = self._get_credential_from_service_principal() else: self.sync_credential = None self.do_connect() weakref.finalize(self, sync, self.loop, close_service_client, self) @classmethod def _strip_protocol(cls, path: str): """ Remove the protocol from the input path Parameters ---------- path: str Path to remove the protocol from Returns ------- str Returns a path without the protocol """ STORE_SUFFIX = ".dfs.core.windows.net" logger.debug(f"_strip_protocol for {path}") if not path.startswith(("abfs://", "az://", "abfss://")): path = path.lstrip("/") path = "abfs://" + path ops = infer_storage_options(path) if "username" in ops: if ops.get("username", None): ops["path"] = ops["username"] + ops["path"] # we need to make sure that the path retains # the format {host}/{path} # here host is the container_name elif ops.get("host", None): if ( ops["host"].count(STORE_SUFFIX) == 0 ): # no store-suffix, so this is container-name ops["path"] = ops["host"] + ops["path"] logger.debug(f"_strip_protocol({path}) = {ops}") return ops["path"] def _get_credential_from_service_principal(self): """ Create a Credential for authentication. This can include a TokenCredential client_id, client_secret and tenant_id Returns ------- Tuple of (Async Credential, Sync Credential). """ from azure.identity import ClientSecretCredential from azure.identity.aio import ( ClientSecretCredential as AIOClientSecretCredential, ) async_credential = AIOClientSecretCredential( tenant_id=self.tenant_id, client_id=self.client_id, client_secret=self.client_secret, ) sync_credential = ClientSecretCredential( tenant_id=self.tenant_id, client_id=self.client_id, client_secret=self.client_secret, ) return (async_credential, sync_credential) def _get_default_azure_credential(self, **kwargs): try: from azure.identity.aio import ( DefaultAzureCredential as AIODefaultAzureCredential, ) asyncio.get_child_watcher().attach_loop(self.loop) self.credential = AIODefaultAzureCredential() self.do_connect() except: # noqa: E722 raise ClientAuthenticationError( "No explict credentials provided. Failed with DefaultAzureCredential!" ) def do_connect(self): """Connect to the BlobServiceClient, using user-specified connection details. Tries credentials first, then connection string and finally account key Raises ------ ValueError if none of the connection details are available """ try: if self.connection_string is not None: self.service_client = AIOBlobServiceClient.from_connection_string( conn_str=self.connection_string ) elif self.account_name: self.account_url: str = ( f"https://{self.account_name}.blob.core.windows.net" ) creds = [self.credential, self.account_key] if any(creds): self.service_client = [ AIOBlobServiceClient( account_url=self.account_url, credential=cred, _location_mode=self.location_mode, ) for cred in creds if cred is not None ][0] elif self.sas_token is not None: if not self.sas_token.startswith("?"): self.sas_token = f"?{self.sas_token}" self.service_client = AIOBlobServiceClient( account_url=self.account_url + self.sas_token, credential=None, _location_mode=self.location_mode, ) elif self.anon is False: self._get_default_azure_credential() else: # Fall back to anonymous login, and assume public container self.service_client = AIOBlobServiceClient( account_url=self.account_url ) else: raise ValueError( "Must provide either a connection_string or account_name with credentials!!" ) except RuntimeError: loop = get_loop() asyncio.set_event_loop(loop) self.do_connect() except Exception as e: raise ValueError(f"unable to connect to account for {e}") def split_path(self, path, delimiter="/", return_container: bool = False, **kwargs): """ Normalize ABFS path string into bucket and key. Parameters ---------- path : string Input path, like `abfs://my_container/path/to/file` delimiter: string Delimiter used to split the path return_container: bool Examples -------- >>> split_path("abfs://my_container/path/to/file") ['my_container', 'path/to/file'] """ if path in ["", delimiter]: return "", "" path = self._strip_protocol(path) path = path.lstrip(delimiter) if "/" not in path: # this means path is the container_name return path, "" else: return path.split(delimiter, 1) def info(self, path, refresh=False, **kwargs): try: fetch_from_azure = (path and self._ls_from_cache(path) is None) or refresh except Exception: fetch_from_azure = True if fetch_from_azure: return sync(self.loop, self._info, path, refresh) return super().info(path) async def _info(self, path, refresh=False, **kwargs): """Give details of entry at path Returns a single dictionary, with exactly the same information as ``ls`` would with ``detail=True``. The default implementation should calls ls and could be overridden by a shortcut. kwargs are passed on to ```ls()``. Some file systems might not be able to measure the file's size, in which case, the returned dict will include ``'size': None``. Returns ------- dict with keys: name (full path in the FS), size (in bytes), type (file, directory, or something else) and other FS-specific keys. """ if refresh: invalidate_cache = True else: invalidate_cache = False path = self._strip_protocol(path) out = await self._ls( self._parent(path), invalidate_cache=invalidate_cache, **kwargs ) out = [o for o in out if o["name"].rstrip("/") == path] if out: return out[0] out = await self._ls(path, invalidate_cache=invalidate_cache, **kwargs) path = path.rstrip("/") out1 = [o for o in out if o["name"].rstrip("/") == path] if len(out1) == 1: if "size" not in out1[0]: out1[0]["size"] = None return out1[0] elif len(out1) > 1 or out: return {"name": path, "size": None, "type": "directory"} else: raise FileNotFoundError def glob(self, path, **kwargs): return sync(self.loop, self._glob, path) async def _glob(self, path, **kwargs): """ Find files by glob-matching. If the path ends with '/' and does not contain "*", it is essentially the same as ``ls(path)``, returning only files. We support ``"**"``, ``"?"`` and ``"[..]"``. kwargs are passed to ``ls``. """ import re ends = path.endswith("/") path = self._strip_protocol(path) indstar = path.find("*") if path.find("*") >= 0 else len(path) indques = path.find("?") if path.find("?") >= 0 else len(path) indbrace = path.find("[") if path.find("[") >= 0 else len(path) ind = min(indstar, indques, indbrace) detail = kwargs.pop("detail", False) if not has_magic(path): root = path depth = 1 if ends: path += "/*" elif await self._exists(path): if not detail: return [path] else: return {path: await self._info(path)} else: if not detail: return [] # glob of non-existent returns empty else: return {} elif "/" in path[:ind]: ind2 = path[:ind].rindex("/") root = path[: ind2 + 1] depth = 20 if "**" in path else path[ind2 + 1 :].count("/") + 1 else: root = "" depth = 20 if "**" in path else 1 allpaths = await self._glob_find( root, maxdepth=depth, withdirs=True, detail=True, **kwargs ) pattern = ( "^" + ( path.replace("\\", r"\\") .replace(".", r"\.") .replace("+", r"\+") .replace("//", "/") .replace("(", r"\(") .replace(")", r"\)") .replace("|", r"\|") .rstrip("/") .replace("?", ".") ) + "$" ) pattern = re.sub("[*]{2}", "=PLACEHOLDER=", pattern) pattern = re.sub("[*]", "[^/]*", pattern) pattern = re.compile(pattern.replace("=PLACEHOLDER=", ".*")) out = { p: allpaths[p] for p in sorted(allpaths) if pattern.match(p.replace("//", "/").rstrip("/")) } if detail: return out else: return list(out) def ls( self, path: str, detail: bool = False, invalidate_cache: bool = False, delimiter: str = "/", return_glob: bool = False, **kwargs, ): files = sync( self.loop, self._ls, path=path, invalidate_cache=invalidate_cache, delimiter=delimiter, return_glob=return_glob, ) if detail: return files else: return list(sorted(set([f["name"] for f in files]))) async def _ls( self, path: str, invalidate_cache: bool = False, delimiter: str = "/", return_glob: bool = False, **kwargs, ): """ Create a list of blob names from a blob container Parameters ---------- path: str Path to an Azure Blob with its container name detail: bool If False, return a list of blob names, else a list of dictionaries with blob details invalidate_cache: bool If True, do not use the cache delimiter: str Delimiter used to split paths return_glob: bool """ logger.debug("abfs.ls() is searching for %s", path) target_path = path.strip("/") container, path = self.split_path(path) if invalidate_cache: self.dircache.clear() cache = {} cache.update(self.dircache) if (container in ["", ".", "*", delimiter]) and (path in ["", delimiter]): if _ROOT_PATH not in cache or invalidate_cache or return_glob: # This is the case where only the containers are being returned logger.info( "Returning a list of containers in the azure blob storage account" ) contents = self.service_client.list_containers(include_metadata=True) containers = [c async for c in contents] files = await self._details(containers) cache[_ROOT_PATH] = files self.dircache.update(cache) return cache[_ROOT_PATH] else: if target_path not in cache or invalidate_cache or return_glob: if container not in ["", delimiter]: # This is the case where the container name is passed async with self.service_client.get_container_client( container=container ) as cc: path = path.strip("/") blobs = cc.walk_blobs( include=["metadata"], name_starts_with=path ) # Check the depth that needs to be screened depth = target_path.count("/") outblobs = [] try: async for next_blob in blobs: if depth in [0, 1] and path == "": outblobs.append(next_blob) elif isinstance(next_blob, BlobProperties): if next_blob["name"].count("/") == depth: outblobs.append(next_blob) elif not next_blob["name"].endswith("/") and ( next_blob["name"].count("/") == (depth - 1) ): outblobs.append(next_blob) else: async for blob_ in next_blob: if isinstance(blob_, BlobProperties) or isinstance( blob_, BlobPrefix ): if blob_["name"].endswith("/"): if ( blob_["name"].rstrip("/").count("/") == depth ): outblobs.append(blob_) elif ( blob_["name"].count("/") == depth and blob_["size"] == 0 ): outblobs.append(blob_) else: pass elif blob_["name"].count("/") == (depth): outblobs.append(blob_) else: pass except ResourceNotFoundError: raise FileNotFoundError finalblobs = await self._details( outblobs, target_path=target_path, return_glob=return_glob ) if return_glob: return finalblobs finalblobs = await self._details(outblobs, target_path=target_path) if not finalblobs: if not await self._exists(target_path): raise FileNotFoundError return [] cache[target_path] = finalblobs self.dircache[target_path] = finalblobs return cache[target_path] async def _details( self, contents, delimiter="/", return_glob: bool = False, target_path="", **kwargs, ): """ Return a list of dictionaries of specifying details about the contents Parameters ---------- contents delimiter: str Delimiter used to separate containers and files return_glob: bool Returns ------- List of dicts Returns details about the contents, such as name, size and type """ output = [] for content in contents: data = { key: content[key] for key in FORWARDED_BLOB_PROPERTIES if content.has_key(key) # NOQA } if content.has_key("container"): # NOQA fname = f"{content.container}{delimiter}{content.name}" fname = fname.rstrip(delimiter) if content.has_key("size"): # NOQA data.update({"name": fname}) data.update({"size": content.size}) data.update({"type": "file"}) else: data.update({"name": fname}) data.update({"size": None}) data.update({"type": "directory"}) else: fname = f"{content.name}" data.update({"name": fname}) data.update({"size": None}) data.update({"type": "directory"}) if "metadata" in data.keys(): if data["metadata"] is not None: if ( "is_directory" in data["metadata"].keys() and data["metadata"]["is_directory"] == "true" ): data.update({"type": "directory"}) data.update({"size": None}) elif ( "is_directory" in data["metadata"].keys() and data["metadata"]["is_directory"] == "false" ): data.update({"type": "file"}) elif ( "hdi_isfolder" in data["metadata"].keys() and data["metadata"]["hdi_isfolder"] == "true" ): data.update({"type": "directory"}) data.update({"size": None}) else: pass if return_glob: data.update({"name": data["name"].rstrip("/")}) output.append(data) if target_path: if len(output) == 1 and output[0]["type"] == "file": # This handles the case where path is a file passed to ls return output output = await filter_blobs(output, target_path, delimiter) return output def find(self, path, withdirs=False, prefix="", **kwargs): return sync( self.loop, self._find, path=path, withdirs=withdirs, prefix=prefix, **kwargs ) async def _find(self, path, withdirs=False, prefix="", with_parent=False, **kwargs): """List all files below path. Like posix ``find`` command without conditions Parameters ---------- path : str The path (directory) to list from withdirs: bool Whether to include directory paths in the output. This is True when used by glob, but users usually only want files. prefix: str Only return files that match `^{path}/{prefix}` kwargs are passed to ``ls``. """ full_path = self._strip_protocol(path) parent_path = full_path.strip("/") + "/" target_path = f"{parent_path}{(prefix or '').lstrip('/')}" container, path = self.split_path(target_path) async with self.service_client.get_container_client( container=container ) as container_client: blobs = container_client.list_blobs( include=["metadata"], name_starts_with=path ) files = {} dir_set = set() dirs = {} detail = kwargs.pop("detail", False) try: infos = await self._details([b async for b in blobs]) except ResourceNotFoundError: # find doesn't raise but returns [] or {} instead infos = [] for info in infos: name = info["name"] parent_dir = self._parent(name).rstrip("/") + "/" if parent_dir not in dir_set and parent_dir != parent_path.strip("/"): dir_set.add(parent_dir) dirs[parent_dir] = { "name": parent_dir, "type": "directory", "size": 0, } if info["type"] == "directory": dirs[name] = info if info["type"] == "file": files[name] = info if not infos: try: file = await self._info(full_path) except FileNotFoundError: pass else: files[file["name"]] = file if withdirs: if not with_parent: dirs.pop(target_path, None) files.update(dirs) names = sorted(files) if not detail: return names return {name: files[name] for name in names} async def _glob_find(self, path, maxdepth=None, withdirs=False, **kwargs): """List all files below path in a recusrsive manner. Like posix ``find`` command without conditions Parameters ---------- path : str maxdepth: int or None If not None, the maximum number of levels to descend withdirs: bool Whether to include directory paths in the output. This is True when used by glob, but users usually only want files. kwargs are passed to ``ls``. """ # TODO: allow equivalent of -name parameter path = self._strip_protocol(path) out = dict() detail = kwargs.pop("detail", False) async for path, dirs, files in self._async_walk( path, maxdepth, detail=True, **kwargs ): if files == []: files = {} dirs = {} if withdirs: files.update(dirs) out.update({info["name"]: info for name, info in files.items()}) if await self._isfile(path) and path not in out: # walk works on directories, but find should also return [path] # when path happens to be a file out[path] = {} names = sorted(out) if not detail: return names else: return {name: out[name] for name in names} def _walk(self, path, dirs, files): for p, d, f in zip([path], [dirs], [files]): yield p, d, f async def _async_walk(self, path: str, maxdepth=None, **kwargs): """Return all files belows path List all files, recursing into subdirectories; output is iterator-style, like ``os.walk()``. For a simple list of files, ``find()`` is available. Note that the "files" outputted will include anything that is not a directory, such as links. Parameters ---------- path: str Root to recurse into maxdepth: int Maximum recursion depth. None means limitless, but not recommended on link-based file-systems. **kwargs are passed to ``ls`` """ path = self._strip_protocol(path) full_dirs = {} dirs = {} files = {} detail = kwargs.pop("detail", False) try: listing = await self._ls(path, return_glob=True, **kwargs) except (FileNotFoundError, IOError): listing = [] for info in listing: # each info name must be at least [path]/part , but here # we check also for names like [path]/part/ pathname = info["name"].rstrip("/") name = pathname.rsplit("/", 1)[-1] if info["type"] == "directory" and pathname != path: # do not include "self" path full_dirs[pathname] = info dirs[name] = info elif pathname == path: # file-like with same name as give path files[""] = info else: files[name] = info if detail: for p, d, f in self._walk(path, dirs, files): yield p, d, f else: yield path, list(dirs), list(files) if maxdepth is not None: maxdepth -= 1 if maxdepth < 1: return for d in full_dirs: async for path, dirs, files in self._async_walk( d, maxdepth=maxdepth, detail=detail, **kwargs ): yield path, dirs, files async def _container_exists(self, container_name): try: async with self.service_client.get_container_client( container_name ) as client: await client.get_container_properties() except ResourceNotFoundError: return False except Exception as e: raise ValueError( f"Failed to fetch container properties for {container_name} for {e}" ) from e else: return True async def _mkdir(self, path, create_parents=True, delimiter="/", **kwargs): """ Mkdir is a no-op for creating anything except top-level containers. This aligns to the Azure Blob Filesystem flat hierarchy Parameters ---------- path: str The path to create create_parents: bool If True (default), create the Azure Container if it does not exist delimiter: str Delimiter to use when splitting the path """ fullpath = path container_name, path = self.split_path(path, delimiter=delimiter) container_exists = await self._container_exists(container_name) if not create_parents and not container_exists: raise PermissionError( "Azure Container does not exist. Set create_parents=True to create!!" ) if container_exists and not kwargs.get("exist_ok", True): raise FileExistsError( f"Cannot overwrite existing Azure container -- {container_name} already exists." ) if not container_exists: try: await self.service_client.create_container(container_name) self.invalidate_cache(_ROOT_PATH) except Exception as e: raise ValueError( f"Proposed container_name of {container_name} does not meet Azure requirements with error {e}!" ) from e self.invalidate_cache(self._parent(fullpath)) mkdir = sync_wrapper(_mkdir) def makedir(self, path, exist_ok=False): """ Create directory entry at path Parameters ---------- path: str The path to create delimiter: str Delimiter to use when splitting the path exist_ok: bool If False (default), raise an error if the directory already exists. """ try: self.mkdir(path, create_parents=True, exist_ok=exist_ok) except FileExistsError: if exist_ok: pass else: raise async def _rm(self, path, recursive=False, maxdepth=None, **kwargs): """Delete files. Parameters ---------- path: str or list of str File(s) to delete. recursive: bool If file(s) are directories, recursively delete contents and then also remove the directory maxdepth: int or None Depth to pass to walk for finding files to delete, if recursive. If None, there will be no limit and infinite recursion may be possible. """ path = await self._expand_path( path, recursive=recursive, maxdepth=maxdepth, with_parent=True ) for p in reversed(path): await self._rm_file(p) self.invalidate_cache() rm = sync_wrapper(_rm) async def _rm_file(self, path, delimiter="/", **kwargs): """ Delete a given file Parameters ---------- path: str Path to file to delete delimiter: str Delimiter to use when splitting the path """ try: kind = await self._info(path) container_name, path = self.split_path(path, delimiter=delimiter) kind = kind["type"] if path != "": async with self.service_client.get_container_client( container=container_name ) as cc: await cc.delete_blob(path.rstrip(delimiter)) elif kind == "directory": await self._rmdir(container_name) else: raise RuntimeError(f"Unable to remove {path}") except ResourceNotFoundError: pass except FileNotFoundError: pass except Exception as e: raise RuntimeError(f"Failed to remove {path} for {e}") self.invalidate_cache(self._parent(path)) sync_wrapper(_rm_file) def rmdir(self, path: str, delimiter="/", **kwargs): sync(self.loop, self._rmdir, path, delimiter=delimiter, **kwargs) async def _rmdir(self, path: str, delimiter="/", **kwargs): """ Remove a directory, if empty Parameters ---------- path: str Path of directory to remove delimiter: str Delimiter to use when splitting the path """ container_name, path = self.split_path(path, delimiter=delimiter) container_exists = await self._container_exists(container_name) if container_exists and not path: await self.service_client.delete_container(container_name) self.invalidate_cache(_ROOT_PATH) def size(self, path): return sync(self.loop, self._size, path) async def _size(self, path): """Size in bytes of file""" res = await self._info(path) size = res.get("size", None) return size def isfile(self, path): return sync(self.loop, self._isfile, path) async def _isfile(self, path): """Is this entry file-like?""" try: path_ = path.split("/")[:-1] path_ = "/".join([p for p in path_]) if self.dircache[path_]: for fp in self.dircache[path_]: if fp["name"] == path and fp["type"] == "file": return True except KeyError: pass except FileNotFoundError: pass try: container_name, path = self.split_path(path) if not path: # A container can not be a file return False else: try: async with self.service_client.get_blob_client( container_name, path ) as bc: props = await bc.get_blob_properties() if props["metadata"]["is_directory"] == "false": return True except ResourceNotFoundError: return False except KeyError: details = await self._details([props]) return details[0]["type"] == "file" except: # noqa: E722 return False def isdir(self, path): return sync(self.loop, self._isdir, path) async def _isdir(self, path): """Is this entry directory-like?""" if path in self.dircache: for fp in self.dircache[path]: # Files will contain themselves in the cache, but # a directory can not contain itself if fp["name"] != path: return True try: container_name, path_ = self.split_path(path) if not path_: return await self._container_exists(container_name) else: if await self._exists(path) and not await self._isfile(path): return True else: return False except IOError: return False def exists(self, path): return sync(self.loop, self._exists, path) async def _exists(self, path): """Is there a file at the given path""" try: if self._ls_from_cache(path): return True except FileNotFoundError: pass except KeyError: pass container_name, path = self.split_path(path) if not path: if container_name: return await self._container_exists(container_name) else: # Empty paths exist by definition return True async with self.service_client.get_blob_client(container_name, path) as bc: if await bc.exists(): return True dir_path = path.rstrip("/") + "/" async with self.service_client.get_container_client( container=container_name ) as container_client: async for blob in container_client.list_blobs( results_per_page=1, name_starts_with=dir_path ): return True else: return False async def _pipe_file(self, path, value, overwrite=True, **kwargs): """Set the bytes of given file""" container_name, path = self.split_path(path) async with self.service_client.get_blob_client( container=container_name, blob=path ) as bc: result = await bc.upload_blob( data=value, overwrite=overwrite, metadata={"is_directory": "false"} ) self.invalidate_cache(self._parent(path)) return result pipe_file = sync_wrapper(_pipe_file) async def _cat_file(self, path, start=None, end=None, **kwargs): path = self._strip_protocol(path) if end is not None: start = start or 0 # download_blob requires start if length is provided. length = end - start else: length = None container_name, path = self.split_path(path) async with self.service_client.get_blob_client( container=container_name, blob=path ) as bc: try: stream = await bc.download_blob(offset=start, length=length) except ResourceNotFoundError as e: raise FileNotFoundError from e result = await stream.readall() return result def cat(self, path, recursive=False, on_error="raise", **kwargs): """Fetch (potentially multiple) paths' contents Returns a dict of {path: contents} if there are multiple paths or the path has been otherwise expanded on_error : "raise", "omit", "return" If raise, an underlying exception will be raised (converted to KeyError if the type is in self.missing_exceptions); if omit, keys with exception will simply not be included in the output; if "return", all keys are included in the output, but the value will be bytes or an exception instance. """ paths = self.expand_path(path, recursive=recursive) if ( len(paths) > 1 or isinstance(path, list) or paths[0] != self._strip_protocol(path) ): out = {} for path in paths: try: out[path] = self.cat_file(path, **kwargs) except Exception as e: if on_error == "raise": raise if on_error == "return": out[path] = e return out else: return self.cat_file(paths[0]) def url(self, path, expires=3600, **kwargs): return sync(self.loop, self._url, path, expires, **kwargs) async def _url(self, path, expires=3600, **kwargs): """Generate presigned URL to access path by HTTP Parameters ---------- path : string the key path we are interested in expires : int the number of seconds this signature will be good for. """ container_name, blob = self.split_path(path) sas_token = generate_blob_sas( account_name=self.account_name, container_name=container_name, blob_name=blob, account_key=self.account_key, permission=BlobSasPermissions(read=True), expiry=datetime.utcnow() + timedelta(seconds=expires), ) async with self.service_client.get_blob_client(container_name, blob) as bc: url = f"{bc.url}?{sas_token}" return url def expand_path(self, path, recursive=False, maxdepth=None): return sync(self.loop, self._expand_path, path, recursive, maxdepth) async def _expand_path(self, path, recursive=False, maxdepth=None, **kwargs): """Turn one or more globs or directories into a list of all matching files""" with_parent = kwargs.get( "with_parent", False ) # Sets whether to return the parent dir if isinstance(path, list): path = [f"{p.strip('/')}" for p in path if not p.endswith("*")] else: if not path.endswith("*"): path = f"{path.strip('/')}" if isinstance(path, str): out = await self._expand_path( [path], recursive, maxdepth, with_parent=with_parent ) else: out = set() path = [self._strip_protocol(p) for p in path] for p in path: if has_magic(p): bit = set(await self._glob(p)) out |= bit if recursive: bit2 = set(await self._expand_path(p)) out |= bit2 continue elif recursive: rec = set( await self._find(p, withdirs=True, with_parent=with_parent) ) out |= rec if p not in out and ( recursive is False or await self._exists(p) or await self._exists(p.rstrip("/")) ): if not await self._exists(p): # This is to verify that we don't miss files p = p.rstrip("/") if not await self._exists(p): continue out.add(p) if not out: raise FileNotFoundError return list(sorted(out)) async def _put_file( self, lpath, rpath, delimiter="/", overwrite=False, callback=None, **kwargws ): """ Copy single file to remote :param lpath: Path to local file :param rpath: Path to remote file :param delimitier: Filepath delimiter :param overwrite: Boolean (False). If True, overwrite the existing file present """ container_name, path = self.split_path(rpath, delimiter=delimiter) if os.path.isdir(lpath): self.makedirs(rpath, exist_ok=True) else: try: with open(lpath, "rb") as f1: async with self.service_client.get_blob_client( container_name, path ) as bc: await bc.upload_blob( f1, overwrite=overwrite, metadata={"is_directory": "false"}, raw_response_hook=make_callback( "upload_stream_current", callback ), ) self.invalidate_cache() except ResourceExistsError: raise FileExistsError("File already exists!") except ResourceNotFoundError: if not await self._exists(container_name): raise FileNotFoundError("Container does not exist.") await self._put_file(lpath, rpath, delimiter, overwrite) self.invalidate_cache() put_file = sync_wrapper(_put_file) async def _cp_file(self, path1, path2, **kwargs): """Copy the file at path1 to path2""" container1, path1 = self.split_path(path1, delimiter="/") container2, path2 = self.split_path(path2, delimiter="/") cc1 = self.service_client.get_container_client(container1) blobclient1 = cc1.get_blob_client(blob=path1) if container1 == container2: blobclient2 = cc1.get_blob_client(blob=path2) else: cc2 = self.service_client.get_container_client(container2) blobclient2 = cc2.get_blob_client(blob=path2) await blobclient2.start_copy_from_url(blobclient1.url) self.invalidate_cache(container1) self.invalidate_cache(container2) cp_file = sync_wrapper(_cp_file) def upload(self, lpath, rpath, recursive=False, **kwargs): """Alias of :ref:`FilesystemSpec.put`.""" return self.put(lpath, rpath, recursive=recursive, **kwargs) def download(self, rpath, lpath, recursive=False, **kwargs): """Alias of :ref:`FilesystemSpec.get`.""" return self.get(rpath, lpath, recursive=recursive, **kwargs) async def _get_file( self, rpath, lpath, recursive=False, delimiter="/", callback=None, **kwargs ): """Copy single file remote to local""" if os.path.isdir(lpath): return container_name, path = self.split_path(rpath, delimiter=delimiter) try: async with self.service_client.get_blob_client( container_name, path.rstrip(delimiter) ) as bc: with open(lpath, "wb") as my_blob: stream = await bc.download_blob( raw_response_hook=make_callback( "download_stream_current", callback ) ) await stream.readinto(my_blob) except ResourceNotFoundError as exception: raise FileNotFoundError from exception get_file = sync_wrapper(_get_file) def getxattr(self, path, attr): meta = self.info(path).get("metadata", {}) return meta[attr] async def _setxattrs(self, rpath, **kwargs): container_name, path = self.split_path(rpath) try: async with self.service_client.get_blob_client(container_name, path) as bc: await bc.set_blob_metadata(metadata=kwargs) self.invalidate_cache(self._parent(rpath)) except Exception as e: raise FileNotFoundError(f"File not found for {e}") setxattrs = sync_wrapper(_setxattrs) def invalidate_cache(self, path=None): if path is None: self.dircache.clear() else: self.dircache.pop(path, None) super(AzureBlobFileSystem, self).invalidate_cache(path) def _open( self, path: str, mode: str = "rb", block_size: int = None, autocommit: bool = True, cache_options: dict = {}, cache_type="readahead", metadata=None, **kwargs, ): """Open a file on the datalake, or a block blob Parameters ---------- path: str Path to file to open mode: str What mode to open the file in - defaults to "rb" block_size: int Size per block for multi-part downloads. autocommit: bool Whether or not to write to the destination directly cache_type: str One of "readahead", "none", "mmap", "bytes", defaults to "readahead" Caching policy in read mode. See the definitions here: https://filesystem-spec.readthedocs.io/en/latest/api.html#readbuffering """ logger.debug(f"_open: {path}") return AzureBlobFile( fs=self, path=path, mode=mode, block_size=block_size, autocommit=autocommit, cache_options=cache_options, cache_type=cache_type, metadata=metadata, **kwargs, ) class AzureBlobFile(AbstractBufferedFile): """File-like operations on Azure Blobs""" DEFAULT_BLOCK_SIZE = 5 * 2**20 def __init__( self, fs: AzureBlobFileSystem, path: str, mode: str = "rb", block_size="default", autocommit: bool = True, cache_type: str = "bytes", cache_options: dict = {}, metadata=None, **kwargs, ): """ Represents a file on AzureBlobStorage that implements buffered reading and writing Parameters ---------- fs: AzureBlobFileSystem An instance of the filesystem path: str The location of the file on the filesystem mode: str What mode to open the file in. Defaults to "rb" block_size: int, str Buffer size for reading and writing. The string "default" will use the class default autocommit: bool Whether or not to write to the destination directly cache_type: str One of "readahead", "none", "mmap", "bytes", defaults to "readahead" Caching policy in read mode. See the definitions in ``core``. cache_options : dict Additional options passed to the constructor for the cache specified by `cache_type`. kwargs: dict Passed to AbstractBufferedFile """ from fsspec.core import caches container_name, blob = fs.split_path(path) self.fs = fs self.path = path self.mode = mode self.container_name = container_name self.blob = blob self.block_size = block_size try: # Need to confirm there is an event loop running in # the thread. If not, create the fsspec loop # and set it. This is to handle issues with # Async Credentials from the Azure SDK loop = get_running_loop() except RuntimeError: loop = get_loop() asyncio.set_event_loop(loop) self.loop = self.fs.loop or get_loop() self.container_client = ( fs.service_client.get_container_client(self.container_name) or self.connect_client() ) self.blocksize = ( self.DEFAULT_BLOCK_SIZE if block_size in ["default", None] else block_size ) self.loc = 0 self.autocommit = autocommit self.end = None self.start = None self.closed = False if cache_options is None: cache_options = {} if "trim" in kwargs: warnings.warn( "Passing 'trim' to control the cache behavior has been deprecated. " "Specify it within the 'cache_options' argument instead.", FutureWarning, ) cache_options["trim"] = kwargs.pop("trim") self.metadata = None self.kwargs = kwargs if self.mode not in {"ab", "rb", "wb"}: raise NotImplementedError("File mode not supported") if self.mode == "rb": if not hasattr(self, "details"): self.details = self.fs.info(self.path) self.size = self.details["size"] self.cache = caches[cache_type]( blocksize=self.blocksize, fetcher=self._fetch_range, size=self.size, **cache_options, ) self.metadata = sync( self.loop, get_blob_metadata, self.container_client, self.blob ) else: self.metadata = metadata or {"is_directory": "false"} self.buffer = io.BytesIO() self.offset = None self.forced = False self.location = None def close(self): """Close file and azure client.""" asyncio.run_coroutine_threadsafe(close_container_client(self), loop=self.loop) super().close() def connect_client(self): """Connect to the Asynchronous BlobServiceClient, using user-specified connection details. Tries credentials first, then connection string and finally account key Raises ------ ValueError if none of the connection details are available """ try: self.fs.account_url: str = ( f"https://{self.fs.account_name}.blob.core.windows.net" ) creds = [self.fs.sync_credential, self.fs.account_key, self.fs.credential] if any(creds): self.container_client = [ AIOBlobServiceClient( account_url=self.fs.account_url, credential=cred, _location_mode=self.fs.location_mode, ).get_container_client(self.container_name) for cred in creds if cred is not None ][0] elif self.fs.connection_string is not None: self.container_client = AIOBlobServiceClient.from_connection_string( conn_str=self.fs.connection_string ).get_container_client(self.container_name) elif self.fs.sas_token is not None: self.container_client = AIOBlobServiceClient( account_url=self.fs.account_url + self.fs.sas_token, credential=None ).get_container_client(self.container_name) else: self.container_client = AIOBlobServiceClient( account_url=self.fs.account_url ).get_container_client(self.container_name) except Exception as e: raise ValueError( f"Unable to fetch container_client with provided params for {e}!!" ) async def _async_fetch_range(self, start: int, end: int = None, **kwargs): """ Download a chunk of data specified by start and end Parameters ---------- start: int Start byte position to download blob from end: int End of the file chunk to download """ if end and (end > self.size): length = self.size - start else: length = None if end is None else (end - start) async with self.container_client: stream = await self.container_client.download_blob( blob=self.blob, offset=start, length=length ) blob = await stream.readall() return blob _fetch_range = sync_wrapper(_async_fetch_range) async def _reinitiate_async_upload(self, **kwargs): pass async def _async_initiate_upload(self, **kwargs): """Prepare a remote file upload""" self._block_list = [] if self.mode == "wb": try: await self.container_client.delete_blob(self.blob) except ResourceNotFoundError: pass except HttpResponseError: pass else: await self._reinitiate_async_upload() elif self.mode == "ab": if not await self.fs._exists(self.path): async with self.container_client.get_blob_client(blob=self.blob) as bc: await bc.create_append_blob(metadata=self.metadata) else: raise ValueError( "File operation modes other than wb are not yet supported for writing" ) _initiate_upload = sync_wrapper(_async_initiate_upload) async def _async_upload_chunk(self, final: bool = False, **kwargs): """ Write one part of a multi-block file upload Parameters ---------- final: bool This is the last block, so should complete file, if self.autocommit is True. """ data = self.buffer.getvalue() length = len(data) block_id = len(self._block_list) block_id = f"{block_id:07d}" if self.mode == "wb": try: async with self.container_client.get_blob_client(blob=self.blob) as bc: await bc.stage_block( block_id=block_id, data=data, length=length, ) self._block_list.append(block_id) if final: block_list = [BlobBlock(_id) for _id in self._block_list] async with self.container_client.get_blob_client( blob=self.blob ) as bc: await bc.commit_block_list( block_list=block_list, metadata=self.metadata ) except Exception as e: # This step handles the situation where data="" and length=0 # which is throws an InvalidHeader error from Azure, so instead # of staging a block, we directly upload the empty blob # This isn't actually tested, since Azureite behaves differently. if block_id == "0000000" and length == 0 and final: async with self.container_client.get_blob_client( blob=self.blob ) as bc: await bc.upload_blob(data=data, metadata=self.metadata) elif length == 0 and final: # just finalize block_list = [BlobBlock(_id) for _id in self._block_list] async with self.container_client.get_blob_client( blob=self.blob ) as bc: await bc.commit_block_list( block_list=block_list, metadata=self.metadata ) else: raise RuntimeError(f"Failed to upload block{e}!") from e elif self.mode == "ab": async with self.container_client.get_blob_client(blob=self.blob) as bc: await bc.upload_blob( data=data, length=length, blob_type=BlobType.AppendBlob, metadata=self.metadata, ) else: raise ValueError( "File operation modes other than wb or ab are not yet supported for upload_chunk" ) _upload_chunk = sync_wrapper(_async_upload_chunk) def __del__(self): try: if not self.closed: self.close() except TypeError: pass
import sys import subprocess import string ### CONSTANTS ### DIGITS = '0123456789' LETTERS = string.ascii_letters LETTERS_DIGITS = LETTERS + DIGITS ### Super String ### def super_string(text, pos_start, pos_end): result = '' # Calculate indices idx_start = max(text.rfind('\n', 0, pos_start.index), 0) idx_end = text.find('\n', idx_start + 1) if idx_end < 0: idx_end = len(text) # Gnerate lines line_count = pos_end.linumber - pos_start.linumber + 1 for i in range(line_count): line = text[idx_start:idx_end] col_start = pos_start.conumber if i == 0 else 0 col_end = pos_end.conumber if i == line_count - 1 else len(line) - 1 result += line.replace('\n', '') + '\n' result += ' ' * col_start + '^' * (col_end - col_start) idx_start = idx_end idx_end = text.find('\n', idx_start + 1) if idx_end < 0: idx_end = len(text) return result.replace('\t', '') ### ERRORS ### class Error: def __init__(self, pos_start, pos_end, error_name, details): self.pos_start = pos_start self.pos_end = pos_end self.error_name = error_name self.details = details def as_string(self): result = f'{self.error_name}: {self.details}\n' result += f'File {self.pos_start.fname}, line {self.pos_start.linumber + 1}' result += '\n\n' + super_string(self.pos_start.ftext, self.pos_start, self.pos_end) return result class CompileTimeWarnning(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, "CompileTimeWarnning", details) class IllegalCharError(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, "IllegalCharError", details) class ExpectedCharError(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, 'ExpectedCharError', details) class InvalidSyntaxError(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, 'InvalidSyntaxError', details) class NameError_(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, 'NameError', details) class TypeError_(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, 'TypeError', details) ### POSITION ### class Position: def __init__(self, index, linumber, conumber, fname, ftext): self.index = index self.linumber = linumber self.conumber = conumber self.fname = fname self.ftext = ftext def increment(self, current_char=None): self.index += 1 self.conumber += 1 if current_char == '\n': self.linumber += 1 self.conumber = 0 return self def copy(self): return Position(self.index, self.linumber, self.conumber, self.fname, self.ftext) ### TOKENS ### TT_INT = 'INT' TT_FLOAT = 'FLOAT' TT_STRING = 'STRING' TT_IDENTIFIER = 'IDENTIFIER' TT_KEYWORD = 'KEYWORD' TT_PLUS = 'PLUS' TT_MINUS = 'MINUS' TT_MUL = 'MUL' TT_DIV = 'DIV' TT_POW = 'POW' TT_EQ = 'EQ' TT_LPAREN = 'LPAREN' TT_RPAREN = 'RPAREN' TT_EE = 'EE' TT_NE = 'NE' TT_LT = 'LT' TT_GT = 'GT' TT_LTE = 'LTE' TT_GTE = 'GTE' TT_NEWLINE = 'NEWLINE' TT_EOF = 'EOF' KEYWORDS = [ 'static', 'and', 'or', 'not', 'True', 'False', 'if', 'elif', 'else', 'for', 'to', 'step', 'while', 'end', 'choice', 'int', 'float', 'bool' ] class Token: def __init__(self, type_, value=None, pos_start=None, pos_end=None): self.type = type_ self.value = value if pos_start: self.pos_start = pos_start.copy() self.pos_end = pos_start.copy() self.pos_end.increment() if pos_end: self.pos_end = pos_end.copy() def __repr__(self): if self.value: return f"{self.type}:{self.value}" return f'{self.type}' ### LEXER ### class Lexer: def __init__(self, fname, text): self.text = text self.pos = Position(-1, 0, -1, fname, text) self.current_char = None self.increment() def increment(self): self.pos.increment(self.current_char) self.current_char = self.text[self.pos.index] if self.pos.index < len(self.text) else None def make_tokens(self): tokens = [] while self.current_char != None: if self.current_char in ' \t': self.increment() elif self.current_char in ';\n': tokens.append(self.make_newline()) elif self.current_char in DIGITS: tokens.append(self.define_number()) elif self.current_char in LETTERS: tokens.append(self.make_identifier()) elif self.current_char == '"': tok, error = self.make_string() if error: return [], error tokens.append(tok) elif self.current_char == '+': tokens.append(Token(TT_PLUS, pos_start=self.pos)) self.increment() elif self.current_char == '-': tokens.append(Token(TT_MINUS, pos_start=self.pos)) self.increment() elif self.current_char == '*': tokens.append(Token(TT_MUL, pos_start=self.pos)) self.increment() elif self.current_char == '/': tokens.append(Token(TT_DIV, pos_start=self.pos)) self.increment() elif self.current_char == '^': tokens.append(Token(TT_POW, pos_start=self.pos)) self.increment() elif self.current_char == '(': tokens.append(Token(TT_LPAREN, pos_start=self.pos)) self.increment() elif self.current_char == ')': tokens.append(Token(TT_RPAREN, pos_start=self.pos)) self.increment() elif self.current_char == '!': tok, error = self.make_not_equals() if error: return [], error tokens.append(tok) elif self.current_char == '=': tokens.append(self.make_equals()) elif self.current_char == '<': tokens.append(self.make_less_than()) elif self.current_char == '>': tokens.append(self.make_greater_than()) else: pos_start = self.pos.copy() char = self.current_char self.increment() return [], IllegalCharError(pos_start, self.pos, "'" + char + "'") tokens.append(Token(TT_EOF, pos_start=self.pos)) return tokens, None def make_newline(self): pos_start = self.pos.copy() while self.current_char != None and self.current_char in ';\n': self.increment() return Token(TT_NEWLINE, pos_start=pos_start, pos_end=self.pos) def define_number(self): num_str = '' period_count = 0 pos_start = self.pos.copy() while self.current_char != None and self.current_char in DIGITS + '.': if self.current_char == '.': if period_count == 1: break period_count += 1 num_str += '.' else: num_str += self.current_char self.increment() if period_count == 0: return Token(TT_INT, int(num_str), pos_start, self.pos) else: return Token(TT_FLOAT, float(num_str), pos_start, self.pos) def make_string(self): string = '' pos_start = self.pos.copy() self.increment() while self.current_char != None and self.current_char != '"': if self.current_char in '\n': return None, ExpectedCharError(pos_start, self.pos, "Expected ' \" ' ending") string += self.current_char self.increment() self.increment() return Token(TT_STRING, string, pos_start, self.pos), None def make_identifier(self): id_str = '' pos_start = self.pos.copy() while self.current_char != None and self.current_char in LETTERS_DIGITS + '_': id_str += self.current_char self.increment() tok_type = TT_KEYWORD if id_str in KEYWORDS else TT_IDENTIFIER return Token(tok_type, id_str, pos_start, self.pos) def make_not_equals(self): pos_start = self.pos.copy() self.increment() if self.current_char == '=': self.increment() return Token(TT_NE, pos_start=pos_start, pos_end=self.pos), None self.increment() return None, ExpectedCharError(pos_start, self.pos, "Expected '=' (after '!')") def make_equals(self): tok_type = TT_EQ pos_start = self.pos.copy() self.increment() if self.current_char == '=': self.increment() tok_type = TT_EE return Token(tok_type, pos_start=pos_start, pos_end=self.pos) def make_less_than(self): tok_type = TT_LT pos_start = self.pos.copy() self.increment() if self.current_char == '=': self.increment() tok_type = TT_LTE return Token(tok_type, pos_start=pos_start, pos_end=self.pos) def make_greater_than(self): tok_type = TT_GT pos_start = self.pos.copy() self.increment() if self.current_char == '=': self.increment() tok_type = TT_GTE return Token(tok_type, pos_start=pos_start, pos_end=self.pos) ### NODES ### class NumberNode: def __init__(self, token): self.token = token self.pos_start = self.token.pos_start self.pos_end = self.token.pos_end def __repr__(self): return f"{self.token.value}" class StringNode: def __init__(self, token): self.token = token self.pos_start = self.token.pos_start self.pos_end = self.token.pos_end def __repr__(self): return f'"{self.token.value}"' class KeywordValueNode: def __init__(self, token): self.token = token self.pos_start = self.token.pos_start self.pos_end = self.token.pos_end def __repr__(self): return f"{self.token.value}" class IdentifierNode: def __init__(self, id_name_token): self.id_name_token = id_name_token self.pos_start = self.id_name_token.pos_start self.pos_end = self.id_name_token.pos_end def __repr__(self): return f"{self.id_name_token.value}" class IdAssignNode: def __init__(self, id_name_token, value_node, manual_static, manual_static_token): self.id_name_token = id_name_token self.value_node = value_node self.manual_static = manual_static self.manual_static_token = manual_static_token self.pos_start = self.id_name_token.pos_start self.pos_end = self.value_node.pos_end def __repr__(self): return f"IdentifierAssign({self.id_name_token.value}, {self.value_node})" class IfNode: def __init__(self, if_value, elif_values, else_value, pos_start, pos_end): self.if_value = if_value self.elif_values = elif_values self.else_value = else_value self.pos_start = pos_start self.pos_end = pos_end def __repr__(self): return f"if({self.if_value}, {self.elif_values}, {self.else_value})" class TypeChoiceNode: def __init__(self, identifier, type_, expr): self.identifier = identifier self.type = type_ self.expr = expr self.pos_start = self.identifier.pos_start self.pos_end = self.expr.pos_end def __repr__(self): return f"TypeChoice({self.identifier}, {self.type}, {self.expr})" class ForNode: def __init__(self, for_keyword_token, var_name_token, start_value_node, end_value_node, step_value_node, statements): self.for_keyword_token = for_keyword_token self.var_name_token = var_name_token self.start_value_node = start_value_node self.end_value_node = end_value_node self.step_value_node = step_value_node self.statements = statements self.pos_start = self.for_keyword_token.pos_start self.pos_end = self.step_value_node.pos_end if self.step_value_node else self.end_value_node.pos_end def __repr__(self): return f'for({self.var_name_token}={self.start_value_node, self.end_value_node}, {self.step_value_node}, {self.statements})' class WhileNode: def __init__(self, while_keyword_token, condition_node, statements): self.while_keyword_token = while_keyword_token self.condition_node = condition_node self.statements = statements self.pos_start = self.while_keyword_token.pos_start self.pos_end = self.condition_node.pos_end def __repr__(self): return f'while({self.conditioin_node}, {self.statements})' class BinOpNode: def __init__(self, left_node, op_token, right_node): self.left_node = left_node self.op_token = op_token self.right_node = right_node self.pos_start = self.left_node.pos_start self.pos_end = self.right_node.pos_end def __repr__(self): return f"({self.left_node}, {self.op_token}, {self.right_node})" class UnaryOpNode: def __init__(self, op_token, node): self.op_token = op_token self.node = node self.pos_start = self.op_token.pos_start self.pos_end = self.node.pos_end def __repr__(self): return f"({self.op_token}, {self.node})" class StatementNode: def __init__(self, statement_node): self.statement_node = statement_node self.pos_start = statement_node.pos_start self.pos_end = statement_node.pos_end def __repr__(self): return f'{self.statement_node}' class CodeNode: def __init__(self, statements): self.statements = statements def __repr__(self): return f'{self.statements}' ### PARSE RESULT class ParseResult: def __init__(self): self.error = None self.node = None def register(self, res): if isinstance(res, ParseResult): if res.error: self.error = res.error return res.node return res def success(self, node): self.node = node return self def failure(self, error): self.error = error return self ### PARSER ### class Parser: def __init__(self, tokens): self.tokens = tokens self.token_index = -1 self.increment() def increment(self): self.token_index += 1 if self.token_index < len(self.tokens): self.current_token = self.tokens[self.token_index] return self.current_token def parser(self): res = self.code() return res def atom(self): res = ParseResult() tok = self.current_token if tok.type in (TT_INT, TT_FLOAT): res.register(self.increment()) return res.success(NumberNode(tok)) if tok.type in TT_STRING: res.register(self.increment()) return res.success(StringNode(tok)) elif tok.type == TT_IDENTIFIER: res.register(self.increment()) return res.success(IdentifierNode(tok)) elif tok.type == TT_KEYWORD and tok.value == 'True': res.register(self.increment()) return res.success(KeywordValueNode(tok)) elif tok.type == TT_KEYWORD and tok.value == 'False': res.register(self.increment()) return res.success(KeywordValueNode(tok)) elif tok.type == TT_LPAREN: res.register(self.increment()) expr = res.register(self.expr()) if res.error: return res if self.current_token.type == TT_RPAREN: res.register(self.increment()) return res.success(expr) else: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected ')'" )) return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected expression or '('" )) def power(self): return self.bin_op(self.atom, (TT_POW,), self.factor) def factor(self): res = ParseResult() tok = self.current_token if tok.type in (TT_PLUS, TT_MINUS): res.register(self.increment()) factor = res.register(self.factor()) if res.error: return res return res.success(UnaryOpNode(tok, factor)) return self.power() def term(self): return self.bin_op(self.factor, (TT_MUL, TT_DIV)) def arith_expr(self): return self.bin_op(self.term, (TT_PLUS, TT_MINUS)) def comp_expr(self): res = ParseResult() if self.current_token.type == TT_KEYWORD and self.current_token.value == 'not': op_token = self.current_token res.register(self.increment()) node = res.register(self.comp_expr()) if res.error: return res return res.success(UnaryOpNode(op_token, node)) node = res.register(self.bin_op(self.arith_expr, (TT_EE, TT_NE, TT_LT, TT_GT, TT_LTE, TT_GTE))) if res.error: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected expression, '(' or 'not'" )) return res.success(node) def identifier(self): res = ParseResult() manual_static = False manual_static_token = None if self.current_token.type == TT_KEYWORD and self.current_token.value == 'static': manual_static = True manual_static_token = self.current_token res.register(self.increment()) if self.current_token.type != TT_IDENTIFIER: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected valid identifier name" )) identifier_name = self.current_token res.register(self.increment()) if self.current_token.type != TT_EQ: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected '='" )) res.register(self.increment()) expr = res.register(self.expr()) if res.error: return res return res.success(IdAssignNode(identifier_name, expr, manual_static, manual_static_token)) def expr(self): res = ParseResult() if (self.current_token.type == TT_IDENTIFIER and self.tokens[self.token_index + 1].type == TT_EQ) or (self.current_token.type == TT_KEYWORD and self.current_token.value == 'static'): return res.success(res.register(self.identifier())) else: return self.bin_op(self.comp_expr, ((TT_KEYWORD, 'and'), (TT_KEYWORD, 'or'))) def if_expr(self): res = ParseResult() pos_start = self.current_token.pos_start if_token = self.current_token res.register(self.increment()) if_expr_value = [] if_expr_value.append(res.register(self.atom())) if res.error: return res if_expr_value.append(res.register( self.code_block(( (TT_KEYWORD, 'elif'), (TT_KEYWORD, 'else'), (TT_KEYWORD, 'end') ), if_token.pos_start, if_token.pos_end) )) if res.error: return res elif_expr_values = [] while self.current_token.type == TT_KEYWORD and self.current_token.value == 'elif': elif_expr_value = [] elif_token = self.current_token res.register(self.increment()) elif_expr_value.append(res.register(self.atom())) if res.error: return res elif_expr_value.append(res.register( self.code_block(( (TT_KEYWORD, 'elif'), (TT_KEYWORD, 'else'), (TT_KEYWORD, 'end') ), elif_token.pos_start, elif_token.pos_end) )) if res.error: return res elif_expr_values.append(elif_expr_value) else_expr_value = None if self.current_token.type == TT_KEYWORD and self.current_token.value == 'else': else_token = self.current_token res.register(self.increment()) else_expr_value = (res.register( self.code_block(( (TT_KEYWORD, 'end'), ), else_token.pos_start, else_token.pos_end) )) if res.error: return res pos_end = self.current_token.pos_end res.register(self.increment()) return res.success(IfNode(if_expr_value, elif_expr_values, else_expr_value, pos_start, pos_end)) def type_choice(self): res = ParseResult() res.register(self.increment()) if self.current_token.type != TT_IDENTIFIER: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected an identifier name" )) identifier = self.current_token res.register(self.increment()) if self.current_token.type != TT_LT: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected '<'" )) res.register(self.increment()) if self.current_token.type == TT_KEYWORD and self.current_token.value == 'int': type_ = self.current_token elif self.current_token.type == TT_KEYWORD and self.current_token.value == 'float': type_ = self.current_token elif self.current_token.type == TT_KEYWORD and self.current_token.value == 'bool': type_ = self.current_token else: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected 'int' or 'float'" )) res.register(self.increment()) if self.current_token.type != TT_GT: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected '>'" )) res.register(self.increment()) expr = res.register(self.statement()) if res.error: return res return res.success(TypeChoiceNode(identifier, type_, expr)) def for_expr(self): res = ParseResult() for_keyword_token = self.current_token res.register(self.increment()) if self.current_token.type != TT_IDENTIFIER: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected identifier (after 'for')" )) var_name = self.current_token res.register(self.increment()) if self.current_token.type != TT_EQ: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected '=' (after identifier)" )) res.register(self.increment()) start_value = res.register(self.atom()) if res.error: return res if not (self.current_token.type == TT_KEYWORD and self.current_token.value == 'to'): return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected 'to' keyword (after identifier's starting point in for statement)" )) res.register(self.increment()) end_value = res.register(self.atom()) if res.error: return res if self.current_token.type == TT_KEYWORD and self.current_token.value == 'step': res.register(self.increment()) step_value = res.register(self.atom()) if res.error: return res else: step_value = None statements = res.register(self.code_block(( (TT_KEYWORD, 'end'), ), for_keyword_token.pos_start, for_keyword_token.pos_end )) if res.error: return res res.register(self.increment()) return res.success(ForNode( for_keyword_token, var_name, start_value, end_value, step_value, statements )) def while_expr(self): res = ParseResult() while_keyword_token = self.current_token res.register(self.increment()) condition = res.register(self.atom()) if res.error: return res statements = res.register(self.code_block(( (TT_KEYWORD, 'end'), ), while_keyword_token.pos_start, while_keyword_token.pos_end )) if res.error: return res res.register(self.increment()) return res.success(WhileNode( while_keyword_token, condition, statements )) def statement(self): res = ParseResult() if self.current_token.type == TT_KEYWORD and self.current_token.value == 'if': statement = res.register(self.if_expr()) elif self.current_token.type == TT_KEYWORD and self.current_token.value == 'choice': statement = res.register(self.type_choice()) elif self.current_token.type == TT_KEYWORD and self.current_token.value == 'for': statement = res.register(self.for_expr()) elif self.current_token.type == TT_KEYWORD and self.current_token.value == 'while': statement = res.register(self.while_expr()) else: statement = res.register(self.expr()) if res.error: return res return res.success(StatementNode(statement)) def code(self): res = ParseResult() statements = res.register(self.code_block((TT_EOF,),)) if res.error: return res return res.success(CodeNode(statements)) ####################################################### def bin_op(self, func_a, ops, func_b=None): if func_b is None: func_b = func_a res = ParseResult() left = res.register(func_a()) if res.error: return res while self.current_token.type in ops or (self.current_token.type, self.current_token.value) in ops: op_token = self.current_token res.register(self.increment()) right = res.register(func_b()) if res.error: return res left = BinOpNode(left, op_token, right) return res.success(left) def code_block(self, ends, pos_start=None, pos_end=None): statements = [] res = ParseResult() if self.current_token.type == TT_NEWLINE: res.register(self.increment()) statements.append(res.register(self.statement())) if res.error: return res while True: if self.current_token.type in ends or (self.current_token.type, self.current_token.value) in ends: break try: if self.tokens[self.token_index+1].type in ends or (self.tokens[self.token_index+1].type, self.tokens[self.token_index+1].value) in ends: break except IndexError: if self.current_token.type == TT_EOF: return res.failure(InvalidSyntaxError( pos_start, pos_end, "This code-block starting have no ending" )) if self.current_token.type != TT_NEWLINE: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected '\\n' or ';'" )) res.register(self.increment()) statements.append(res.register(self.statement())) if res.error: return res if self.current_token.type == TT_NEWLINE: res.register(self.increment()) return res.success(statements) ### Nodes ### class IntNode: def __init__(self, value: int): self.value = value self.type = 'int' def __repr__(self): return f'{self.value}' class DoubleNode: def __init__(self, value: float): self.value = value self.type = 'double' def __repr__(self): return f'{self.value}' class TrueNode: def __init__(self): self.value = 1 self.type = '_Bool' def __repr__(self): return f'{self.value}' class FalseNode: def __init__(self): self.value = 0 self.type = '_Bool' def __repr__(self): return f'{self.value}' class CStringNode: def __init__(self, value): self.value = value self.type = 'str' self.str_length = len(eval(f'"{value}"'))+1 def __repr__(self): return f'"{self.value}"' class SymbolNode: def __init__(self, symbols=None, name=None, branch=None, type_=None, for_var=False): if for_var: self.for_var = True self.name = name self.type = type_ else: self.for_var = False self.symbols = symbols self.name = name self.branch = branch.copy() self.type = self.symbols[self.name].type self.symbols[self.name].usage_count += 1 self.symbol_usage = self.symbols[self.name].usage_count self.type_choice = self.symbols[self.name].type_choice if isinstance(self.type, list) and self.type_choice is not None: self.type = self.type_choice if self.type == 'str': self.str_length = self.symbols[name].str_length def __repr__(self): if self.for_var: return f'{self.name}_i' self.symbol = self.symbols[self.name] if isinstance(self.symbol, Variable): return f'{self.symbol.name}_' if isinstance(self.type, list) or self.type=='str': result = f'identifiers[{self.symbol.identifier}]' else: result = f'*({self.type}*)identifiers[{self.symbol.identifier}]' if self.symbol_usage == self.symbol.usage_count: if self.branch: return result + f'\\*late-after:free(identifiers[{self.symbol.identifier}])*\\' return result + f'\\*after:free(identifiers[{self.symbol.identifier}])*\\' return result class SymbolAssignNode: def __init__(self, symbols, name, node, type_change=False, branch_init=False): self.symbols = symbols self.name = name self.node = node self.type_change = type_change self.branch_init = branch_init self.type = node.type self.symbol_type = self.symbols[self.name].type self.assign_count = self.symbols[self.name].assign_count self.usage_till_now = self.symbols[self.name].usage_count if self.type == 'str': self.symbols[self.name].str_length = node.str_length def __repr__(self): self.symbol = self.symbols[self.name] if self.symbol.usage_count - self.usage_till_now == 0: return f'{self.node}' if isinstance(self.symbol, Variable): return f'{self.symbol.name}_ = {self.node}' if self.type == 'str': core_name = f'strcpy(identifiers[{self.symbol.identifier}], {self.node})' core_size = f'sizeof(char)*{self.node.str_length}' self.type_change = True else: core_name = f'*({self.type}*)identifiers[{self.symbol.identifier}] = {self.node}' core_size = f'sizeof({self.type})' if self.branch_init: return f'{core_name}\\*before:identifiers[{self.symbol.identifier}] = realloc(identifiers[{self.symbol.identifier}], {core_size});after:{self.symbol.name}_type = {self.symbol.type.index(self.node.type)};pre-before:int {self.symbol.name}_type = 0;pre-before:identifiers[{self.symbol.identifier}] = 0*\\' if self.assign_count == 1: if isinstance(self.symbol.type, list): return f'{core_name}\\*before:identifiers[{self.symbol.identifier}] = malloc({core_size});before:int {self.symbol.name}_type = {self.symbol_type.index(self.type)}*\\' return f'{core_name}\\*before:identifiers[{self.symbol.identifier}] = malloc({core_size})*\\' if self.type_change: if isinstance(self.symbol_type, list): return f'{core_name}\\*before:identifiers[{self.symbol.identifier}] = realloc(identifiers[{self.symbol.identifier}], {core_size});after:{self.symbol.name}_type = {self.symbol.type.index(self.node.type)}*\\' return f'{core_name}\\*before:identifiers[{self.symbol.identifier}] = realloc(identifiers[{self.symbol.identifier}], {core_size})*\\' return core_name class NegateNode: def __init__(self, node): self.node = node self.type = self.node.type def __repr__(self): return f'-({self.node})' class AddNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node if self.left_node == 'double' or self.right_node == 'double': self.type = 'double' else: self.type = 'int' def __repr__(self): return f'({self.left_node})+({self.right_node})' class SubtractNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node if self.left_node == 'double' or self.right_node == 'double': self.type = 'double' else: self.type = 'int' def __repr__(self): return f'({self.left_node})-({self.right_node})' class MultiplyNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node if self.left_node == 'double' or self.right_node == 'double': self.type = 'double' else: self.type = 'int' def __repr__(self): return f'({self.left_node})*({self.right_node})' class DivideNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node self.type = 'double' def __repr__(self): return f'({self.left_node})/(double)({self.right_node})' class FunctionCallNode: def __init__(self, name, type_, arguments): self.name = name self.type = type_ self.arguments = arguments def __repr__(self): arg_str = '' first = True for argument in self.arguments: if first: arg_str += f'{argument}' else: arg_str += f', {argument}' first = False return f'({self.type}){self.name}({arg_str})' class EqualNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node self.type = '_Bool' def __repr__(self): return f'({self.left_node})==({self.right_node})' class NotEqualNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node def __repr__(self): return f'({self.left_node})!=({self.right_node})' class LessThanNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node self.type = '_Bool' def __repr__(self): return f'({self.left_node})<({self.right_node})' class GreaterThanNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node self.type = '_Bool' def __repr__(self): return f'({self.left_node})>({self.right_node})' class LessThanEqualNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node self.type = '_Bool' def __repr__(self): return f'({self.left_node})<=({self.right_node})' class GreaterThanEqualNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node self.type = '_Bool' def __repr__(self): return f'({self.left_node})>=({self.right_node})' class AndNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node self.type = '_Bool' def __repr__(self): return f'({self.left_node})&&({self.right_node})' class OrNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node self.type = '_Bool' def __repr__(self): return f'({self.left_node})||({self.right_node})' class NotNode: def __init__(self, node): self.node = node self.type = '_Bool' def __repr__(self): return f'!({self.node})' class CodeBlock: def commandize(cmds, statement_str): result = statement_str for cmd in cmds: cmd = cmd.replace('\\*', '') cmd = cmd.replace('*\\', '') for command in cmd.split(';'): if not command: continue head, line = command.split(':') if head == 'before': result = f'{line};\n{result}' if head == 'after': result = f'{result};\n{line}' if head == 'pre-before': result += f'\\*before:{line}*\\' if head == 'late-after': result += f'\\*after:{line}*\\' result += ';\n' return result def code_block(statements): result = '' for statement in statements: statement_str = statement.__repr__() if len(statement_str) < 2: result += statement_str + ';\n' continue cmd = [] while '\\*' in statement_str: tmpi = statement_str.find('\\*') tmp = statement_str[tmpi:statement_str.find('*\\', tmpi)+2] statement_str = statement_str.replace(tmp, '') cmd.append(tmp) result += CodeBlock.commandize(cmd, statement_str) return result class CIfNode: def __init__(self, if_value, elif_values=[], else_value=None): self.if_value = if_value self.elif_values = elif_values self.else_value = else_value def __repr__(self): result = f'if ({self.if_value[0]}){{\n' result += CodeBlock.code_block(self.if_value[1]) result += '}' for elif_value in self.elif_values: result += f'else if ({elif_value[0]}){{\n' result += CodeBlock.code_block(elif_value[1]) result += '}' if self.else_value: result += 'else{\n' result += CodeBlock.code_block(self.else_value) result += '}' return result class CForNode: def __init__(self, identifier_name, var_type, start_value, end_value, step_value, statements): self.identifier_name = identifier_name self.var_type = var_type self.start_value = start_value self.end_value = end_value self.step_value = step_value self.statements = statements def __repr__(self): result = 'for (' result += f'{self.var_type} {self.identifier_name}_i = {self.start_value}; ' result += f'{self.identifier_name}_i < {self.end_value}; ' if self.step_value == 1: result += f'{self.identifier_name}_i++' else: result += f'{self.identifier_name}_i+={self.step_value}' result += '){\n' result += CodeBlock.code_block(self.statements) result += '}' return result class CWhileNode: def __init__(self, condition, statements): self.condition = condition self.statements = statements def __repr__(self): result = f'while ({self.condition}){{\n' result += CodeBlock.code_block(self.statements) result += '}' return result class CStatementNode: def __init__(self, node): self.node = node def __repr__(self): return f'{self.node}' class GlobalVariableNode: def __init__(self, name, type_): self.name = name self.type = type_ def __repr__(self): return f'{self.type} {self.name};\n' class CCodeNode: def __init__(self, libraries, identifier_count, global_variables, statement_nodes): self.libraries = libraries self.identifier_count = identifier_count self.global_variables = global_variables self.statement_nodes = statement_nodes def commandize(self, cmds, statement_str): result = statement_str for cmd in cmds: cmd = cmd.replace('\\*', '') cmd = cmd.replace('*\\', '') for command in cmd.split(';'): if not command: continue head, line = command.split(':') if head == 'before': result = f'{line};\n{result}' if head == 'after': result = f'{result};\n{line}' result += ';\n' return result def __repr__(self): result = '' for library in self.libraries: result += f'{library}' if self.identifier_count: result += f'\nvoid* identifiers[{self.identifier_count}];\n' for variable in self.global_variables: if variable.usage_count: result += f'{variable.type} {variable.name}_;\n' result += 'int main(){\n' for statement in self.statement_nodes: statement_str = statement.__repr__() if len(statement_str) < 2: result += statement_str + ';\n' continue cmd = [] while '\\*' in statement_str: tmpi = statement_str.find('\\*') tmp = statement_str[tmpi:statement_str.find('*\\', tmpi)+2] statement_str = statement_str.replace(tmp, '') cmd.append(tmp) result += self.commandize(cmd, statement_str) result += 'return 0;\n}' return result ### AnalizeResult ### class AnalizeResult: def __init__(self): self.value = None self.error = None def register(self, res): if res.error: self.error = res.error return res.value def success(self, value): self.value = value return self def failure(self, error): self.error = error return self class Symbol: def __init__(self, name, type_): self.name = name self.type = type_ self.assign_count = 0 self.usage_count = 0 self.manual_static = False self.if_thingy = 0 self.is_branching = False self.type_choice = None class Identifier(Symbol): def __init__(self, name, type_, identifier, start_assign_count=0, start_usage_count=0, type_choice=None, if_thingy=0, is_branching=False): super().__init__(name, type_) self.identifier = identifier self.first_type = self.type self.assign_count = start_assign_count self.usage_count = start_usage_count self.if_thingy = if_thingy self.is_branching = is_branching self.type_choice = type_choice class Variable(Symbol): def __init__(self, name, type_, manual_static): super().__init__(name, type_) self.manual_static = manual_static def convert_to_identifier(self, identifier): if self.manual_static: return return Identifier(self.name, self.type, identifier, self.assign_count, self.usage_count, self.type_choice, self.if_thingy, self.is_branching) ### SymbolTable ### class SymbolTable: BRANCH_IF = 0 BRANCH_ELSE = 1 BRANCH_WHILE = 2 BRANCH_FOR = 3 def __init__(self): self.branchs = [] self.branch_count = 0 self.symbols = {} self.identifier_count = 0 self.global_variables = [] self.for_refrence = None def symbol_get(self, name, node): res = AnalizeResult() if self.for_refrence is not None: if name in self.for_refrence: return res.success(SymbolNode(for_var=True, name=name, type_=self.for_refrence[1])) if name in self.symbols.keys(): return res.success(SymbolNode(self.symbols, name, self.branchs)) return res.failure( NameError_(node.pos_start, node.pos_end, f"Name '{name}' is not defined") ) def symbol_assign(self, name, node, value_node, manual_static, libraries): res = AnalizeResult() if name in self.symbols.keys(): symbol = self.symbols[name] if isinstance(symbol, Identifier) and manual_static: return res.failure( TypeError_(node.manual_static_token.pos_start, node.manual_static_token.pos_end, "Can't make a dynamic variable static" )) if value_node.type == symbol.type and not self.branchs: if manual_static: symbol.manual_static = True symbol.assign_count += 1 return res.success(SymbolAssignNode(self.symbols, name, value_node)) if symbol.manual_static or manual_static: return res.failure( TypeError_(node.pos_start, node.pos_end, f"Can't change static variables's type" )) if isinstance(symbol, Variable) and value_node.type != symbol.type: self.global_variables.remove(symbol) symbol = symbol.convert_to_identifier(self.identifier_count) self.symbols[name] = symbol self.identifier_count += 1 libraries.add('#include<stdlib.h>\n') if self.branchs: if self.branchs[-1] in (SymbolTable.BRANCH_IF, SymbolTable.BRANCH_FOR, SymbolTable.BRANCH_WHILE) : if value_node.type != symbol.type: if isinstance(symbol.type, list): if value_node.type not in symbol.type: symbol.type.append(value_node.type) if value_node.type == symbol.type[0]: symbol.is_branching = True else: symbol.if_thingy = symbol.type symbol.type = [symbol.type, value_node.type] else: symbol.is_branching = True symbol.if_thingy = symbol.type elif self.branchs[-1] == SymbolTable.BRANCH_ELSE: if isinstance(symbol.type, list): if value_node.type != symbol.if_thingy: if value_node.type not in symbol.type: symbol.type[0] = value_node.type elif not symbol.is_branching: symbol.type.remove(symbol.type[0]) else: symbol.type = [symbol.type, value_node.type] else: symbol.type = value_node.type if len(set(symbol.type)) == 1: symbol.type = symbol.type[0] symbol.assign_count += 1 return res.success(SymbolAssignNode(self.symbols, name, value_node, type_change=True)) if self.branchs: if self.branchs[-1] == SymbolTable.BRANCH_WHILE or self.branchs[-1] == SymbolTable.BRANCH_FOR: return res.failure(NameError_( node.pos_start, node.pos_end, f"Name '{name}' is not defined, hint: can't initiate identifiers inside a loop" )) print(CompileTimeWarnning( node.pos_start, node.pos_end, f"Should not initiate identifiers inside a branch, could cause None type identifiers" ).as_string()) symbol = Identifier(name, None, self.identifier_count) self.symbols[name] = symbol self.identifier_count += 1 libraries.add('#include<stdlib.h>\n') if self.branchs[-1] == SymbolTable.BRANCH_IF: symbol.type = [symbol.type, value_node.type] symbol.if_thingy == self.branch_count else: symbol.type = [symbol.type, value_node.type] symbol.assign_count += 1 if value_node.type == 'str': libraries.add('#include<stdlib.h>\n') libraries.add('#include<string.h>\n') return res.success(SymbolAssignNode(self.symbols, name, value_node, branch_init=True)) if value_node.type == 'str': libraries.add('#include<stdlib.h>\n') libraries.add('#include<string.h>\n') symbol = Identifier(name, value_node.type, self.identifier_count) self.identifier_count += 1 else: symbol = Variable(name, value_node.type, manual_static) self.global_variables.append(symbol) self.symbols[name] = symbol symbol.assign_count += 1 return res.success(SymbolAssignNode(self.symbols, name, value_node)) def symbol_type_choice(self, name, type_, node=None): type_map = {'int':'int', 'bool':'_Bool', 'float':'double'} res = AnalizeResult() if node is not None: if name not in self.symbols.keys(): return res.failure( NameError_(node.pos_start, node.pos_end, f"Name '{name}' is not defined") ) symbol = self.symbols[name] if not isinstance(symbol.type, list): return res.failure( TypeError_(node.pos_start, node.pos_end, "Can't type choice on single branch variables") ) if type_map[type_.value] not in symbol.type: return res.failure( TypeError_(type_.pos_start, type_.pos_end, f"Identifier '{name}' does not have '{type_.value}' type branch") ) self.symbols[name].type_choice = type_map[type_.value] return res.success(SymbolNode(self.symbols, name, self.branchs)) self.symbols[name].type_choice = type_ return res.success(SymbolNode(self.symbols, name, self.branchs)) def symbol_type_choice_end(self, name): self.symbols[name].type_choice = None def start_if_branch(self): self.branch_count += 1 self.branchs.append(SymbolTable.BRANCH_IF) def start_elif_branch(self): self.branchs.append(SymbolTable.BRANCH_IF) def start_else_branch(self): self.branchs.append(SymbolTable.BRANCH_ELSE) def start_while_branch(self): self.branchs.append(SymbolTable.BRANCH_WHILE) def start_for_branch(self, identifier_name, type_): self.for_refrence = identifier_name, type_ self.branchs.append(SymbolTable.BRANCH_FOR) def end_branch(self): self.for_refrence = None self.branchs.pop() ### Analizer ### class Analizer: def __init__(self): self.libraries = set() def visit(self, node): method_name = f'visit_{type(node).__name__}' method = getattr(self, method_name, self.no_visit_node) return method(node) def no_visit_node(self, node): raise Exception(f'No visit_{type(node).__name__} method defined.') def visit_NumberNode(self, node): res = AnalizeResult() if type(node.token.value) is int: return res.success(IntNode(node.token.value)) if type(node.token.value) is float: return res.success(DoubleNode(node.token.value)) def visit_StringNode(self, node): return AnalizeResult().success(CStringNode(node.token.value)) def visit_KeywordValueNode(self, node): res = AnalizeResult() if node.token.value == 'True': return res.success(TrueNode()) if node.token.value == 'False': return res.success(FalseNode()) def visit_IdentifierNode(self, node): return self.symbol_table.symbol_get(node.id_name_token.value, node) def visit_IdAssignNode(self, node): res = AnalizeResult() value_node = res.register(self.visit(node.value_node)) if res.error: return res if isinstance(value_node.type, list): if_expr = [] elif_expr = [] if None in value_node.type: value_node.type.remove(None) for i, value_type in enumerate(value_node.type): if not i: self.symbol_table.start_if_branch() symbol = res.register(self.symbol_table.symbol_get(value_node.name, node)) if res.error: return res if_expr.append( EqualNode( f'{symbol.name}_type', IntNode(symbol.type.index(value_type)))) res.register(self.symbol_table.symbol_type_choice(value_node.name, value_type)) if_expr.append([res.register(self.symbol_table.symbol_assign( node.id_name_token.value, node, res.register(self.visit(node.value_node)), node.manual_static, self.libraries))]) self.symbol_table.end_branch() else: tmpelif_expr = [] self.symbol_table.start_elif_branch() symbol = res.register(self.symbol_table.symbol_get(value_node.name, node)) if res.error: return res tmpelif_expr.append( EqualNode( f'{symbol.name}_type', IntNode(symbol.type.index(value_type)))) res.register(self.symbol_table.symbol_type_choice(value_node.name, value_type)) tmpelif_expr.append([res.register(self.symbol_table.symbol_assign( node.id_name_token.value, node, res.register(self.visit(node.value_node)), node.manual_static, self.libraries))]) elif_expr.append(tmpelif_expr) self.symbol_table.end_branch() if res.error: return res self.symbol_table.symbol_type_choice_end(value_node.name) return res.success(CIfNode(if_expr, elif_expr)) name = node.id_name_token.value answer = self.symbol_table.symbol_assign(name, node, value_node, node.manual_static, self.libraries) return answer def visit_IfNode(self, node): res = AnalizeResult() if_value_node = [] self.symbol_table.start_if_branch() if_value_node.append(res.register(self.visit(node.if_value[0]))) if res.error: return res if_statement_nodes = [] for statement in node.if_value[1]: if_statement_nodes.append(res.register(self.visit(statement))) if res.error: return res if_value_node.append(if_statement_nodes) self.symbol_table.end_branch() elif_value_nodes = [] for elif_value in node.elif_values: self.symbol_table.start_elif_branch() elif_expr, elif_statements = elif_value elif_value_node = [] elif_value_node.append(res.register(self.visit(elif_expr))) if res.error: return res elif_statement_nodes = [] for statement in elif_statements: elif_statement_nodes.append(res.register(self.visit(statement))) if res.error: return res elif_value_node.append(elif_statement_nodes) elif_value_nodes.append(elif_value_node) self.symbol_table.end_branch() else_value_node = [] if node.else_value is not None: self.symbol_table.start_else_branch() for statement in node.else_value: else_value_node.append(res.register(self.visit(statement))) if res.error: return res self.symbol_table.end_branch() return res.success(CIfNode(if_value_node, elif_value_nodes, else_value_node)) def visit_TypeChoiceNode(self, node): map_type = {'int':'int', 'bool':'_Bool', 'float':'double'} res = AnalizeResult() symbol = res.register(self.symbol_table.symbol_get(node.identifier.value, node)) if res.error: return res if_expr = [] res.register(self.symbol_table.symbol_type_choice(node.identifier.value, node.type, node)) if res.error: return res if_expr.append(EqualNode( f'{symbol.name}_type', IntNode(symbol.type.index(map_type[node.type.value])) )) self.symbol_table.start_if_branch() expr = res.register(self.visit(node.expr)) if res.error: return res if_expr.append([expr]) self.symbol_table.symbol_type_choice_end(node.identifier.value) self.symbol_table.end_branch() return res.success(CIfNode(if_expr)) def visit_ForNode(self, node): res = AnalizeResult() identifier_name = node.var_name_token.value start_value = res.register(self.visit(node.start_value_node)) if res.error: return res end_value = res.register(self.visit(node.end_value_node)) if res.error: return res step_value = res.register(self.visit(node.step_value_node)) if node.step_value_node else 1 if res.error: return res var_type = 'int' if start_value=='int' and end_value=='int' else 'float' self.symbol_table.start_for_branch(identifier_name, var_type) statements = [] for statement in node.statements: statements.append(res.register(self.visit(statement))) if res.error: return res self.symbol_table.end_branch() return res.success(CForNode( identifier_name, var_type, start_value, end_value, step_value, statements )) def visit_WhileNode(self, node): res = AnalizeResult() condition = res.register(self.visit(node.condition_node)) if res.error: return res self.symbol_table.start_while_branch() statements = [] for statement in node.statements: statements.append(res.register(self.visit(statement))) if res.error: return res self.symbol_table.end_branch() return res.success(CWhileNode( condition, statements )) def visit_BinOpNode(self, node): res = AnalizeResult() left_node = res.register(self.visit(node.left_node)) if res.error: return res right_node = res.register(self.visit(node.right_node)) if res.error: return res if node.op_token.type == TT_PLUS: return res.success(AddNode(left_node, right_node)) if node.op_token.type == TT_MINUS: return res.success(SubtractNode(left_node, right_node)) if node.op_token.type == TT_MUL: return res.success(MultiplyNode(left_node, right_node)) if node.op_token.type == TT_DIV: return res.success(DivideNode(left_node, right_node)) if node.op_token.type == TT_POW: type_ = 'double' if left_node.type == 'double' or right_node.type == 'double' else 'int' self.libraries.add('#include<math.h>\n') return res.success(FunctionCallNode('pow', type_, (left_node, right_node))) if node.op_token.type == TT_EE: return res.success(EqualNode(left_node, right_node)) if node.op_token.type == TT_EE: return res.success(NotEqualNode(left_node, right_node)) if node.op_token.type == TT_LT: return res.success(LessThanNode(left_node, right_node)) if node.op_token.type == TT_GT: return res.success(GreaterThanNode(left_node, right_node)) if node.op_token.type == TT_LTE: return res.success(LessThanEqualNode(left_node, right_node)) if node.op_token.type == TT_GTE: return res.success(GreaterThanEqualNode(left_node, right_node)) if node.op_token.type == TT_KEYWORD and node.op_token.value == 'and': return res.success(AndNode(left_node, right_node)) if node.op_token.type == TT_KEYWORD and node.op_token.value == 'or': return res.success(OrNode(left_node, right_node)) def visit_UnaryOpNode(self, node): res = AnalizeResult() value_node = res.register(self.visit(node.node)) if res.error: return res if node.op_token.type == TT_PLUS: return res.success(value_node) if node.op_token.type == TT_MINUS: return res.success(NegateNode(value_node)) if node.op_token.type == TT_KEYWORD and node.op_token.value == 'or': return res.success(NotNode(value_node)) def visit_StatementNode(self, node): res = AnalizeResult() statement_node = res.register(self.visit(node.statement_node)) if res.error: return res return res.success(CStatementNode(statement_node)) def visit_CodeNode(self, node): self.symbol_table = SymbolTable() res = AnalizeResult() statement_nodes = [] for statement in node.statements: statement_nodes.append(res.register(self.visit(statement))) if res.error: return res return res.success(CCodeNode(self.libraries, self.symbol_table.identifier_count, self.symbol_table.global_variables, statement_nodes)) ### RUN ### def lex(file_name, context): # Lexing lexer = Lexer(file_name, context) return lexer.make_tokens() def parse(tokens): # Parsing parser = Parser(tokens) return parser.parser() def analize(abstractSyntaxTree): # analizing analizer = Analizer() return analizer.visit(abstractSyntaxTree.node) def run(file_name, context): tokens, error = lex(file_name, context) if error: return None, error ast = parse(tokens) if ast.error: return None, ast.error c = analize(ast) if c.error: return None, c.error return c.value, None def open_code(file_name): try: with open(file_name, 'r') as f: context = f.read() return context except FileNotFoundError as exeption: print(exeption) sys.exit() def main(): if len(sys.argv) < 2: print("""Please provide any arguments: compile <file_name> c <file_name> lex <file_name> parse <file_name> """) sys.exit() if len(sys.argv) < 3: print("Please also provide the file path of the script :)") sys.exit() cmd = sys.argv[1].lower() file_name = sys.argv[2] context = open_code(file_name) if cmd == 'compile': result, error = run(file_name, context) if error: print(error.as_string()) else: xfile = file_name.split('.')[0] with open(xfile+'.c', 'w') as f: f.write(result.__repr__()) try: subprocess.Popen(f"gcc -O2 {xfile+".c"} -o {xfile+".exe"}") except FileNotFoundError: print("gcc is not installed, you need gcc compiler to use this program ;(") elif cmd == 'run': result, error = run(file_name, context) if error: print(error.as_string()) else: xfile = file_name.split('.')[0] with open(xfile+'.c', 'w') as f: f.write(result.__repr__()) try: subprocess.Popen(f"gcc -O2 {xfile+".c"} -o {xfile+".exe"}") subprocess.Popen(f"./{xfile+".exe"}") except FileNotFoundError: print("gcc is not installed, you need gcc compiler to use this program ;(") elif cmd == 'c': result, error = run(file_name, context) if error: print(error.as_string()) else: print(result) elif cmd == 'lex': result, error = lex(file_name, context) if error: print(error.as_string()) else: print(result) elif cmd == 'parse': tokens, error = lex(file_name, context) if error: print(error.as_string()) else: ast = parse(tokens) if ast.error: print(ast.error.as_string()) else: print(ast.node) else: print(f"""Invalid argument {cmd} :( Please provide one of the valid arguments: compile <file_name> c <file_name> lex <file_name> parse <file_name> """) sys.exit() if __name__ == "__main__": main()
import sys import subprocess import string ### CONSTANTS ### DIGITS = '0123456789' LETTERS = string.ascii_letters LETTERS_DIGITS = LETTERS + DIGITS ### Super String ### def super_string(text, pos_start, pos_end): result = '' # Calculate indices idx_start = max(text.rfind('\n', 0, pos_start.index), 0) idx_end = text.find('\n', idx_start + 1) if idx_end < 0: idx_end = len(text) # Gnerate lines line_count = pos_end.linumber - pos_start.linumber + 1 for i in range(line_count): line = text[idx_start:idx_end] col_start = pos_start.conumber if i == 0 else 0 col_end = pos_end.conumber if i == line_count - 1 else len(line) - 1 result += line.replace('\n', '') + '\n' result += ' ' * col_start + '^' * (col_end - col_start) idx_start = idx_end idx_end = text.find('\n', idx_start + 1) if idx_end < 0: idx_end = len(text) return result.replace('\t', '') ### ERRORS ### class Error: def __init__(self, pos_start, pos_end, error_name, details): self.pos_start = pos_start self.pos_end = pos_end self.error_name = error_name self.details = details def as_string(self): result = f'{self.error_name}: {self.details}\n' result += f'File {self.pos_start.fname}, line {self.pos_start.linumber + 1}' result += '\n\n' + super_string(self.pos_start.ftext, self.pos_start, self.pos_end) return result class CompileTimeWarnning(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, "CompileTimeWarnning", details) class IllegalCharError(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, "IllegalCharError", details) class ExpectedCharError(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, 'ExpectedCharError', details) class InvalidSyntaxError(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, 'InvalidSyntaxError', details) class NameError_(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, 'NameError', details) class TypeError_(Error): def __init__(self, pos_start, pos_end, details): super().__init__(pos_start, pos_end, 'TypeError', details) ### POSITION ### class Position: def __init__(self, index, linumber, conumber, fname, ftext): self.index = index self.linumber = linumber self.conumber = conumber self.fname = fname self.ftext = ftext def increment(self, current_char=None): self.index += 1 self.conumber += 1 if current_char == '\n': self.linumber += 1 self.conumber = 0 return self def copy(self): return Position(self.index, self.linumber, self.conumber, self.fname, self.ftext) ### TOKENS ### TT_INT = 'INT' TT_FLOAT = 'FLOAT' TT_STRING = 'STRING' TT_IDENTIFIER = 'IDENTIFIER' TT_KEYWORD = 'KEYWORD' TT_PLUS = 'PLUS' TT_MINUS = 'MINUS' TT_MUL = 'MUL' TT_DIV = 'DIV' TT_POW = 'POW' TT_EQ = 'EQ' TT_LPAREN = 'LPAREN' TT_RPAREN = 'RPAREN' TT_EE = 'EE' TT_NE = 'NE' TT_LT = 'LT' TT_GT = 'GT' TT_LTE = 'LTE' TT_GTE = 'GTE' TT_NEWLINE = 'NEWLINE' TT_EOF = 'EOF' KEYWORDS = [ 'static', 'and', 'or', 'not', 'True', 'False', 'if', 'elif', 'else', 'for', 'to', 'step', 'while', 'end', 'choice', 'int', 'float', 'bool' ] class Token: def __init__(self, type_, value=None, pos_start=None, pos_end=None): self.type = type_ self.value = value if pos_start: self.pos_start = pos_start.copy() self.pos_end = pos_start.copy() self.pos_end.increment() if pos_end: self.pos_end = pos_end.copy() def __repr__(self): if self.value: return f"{self.type}:{self.value}" return f'{self.type}' ### LEXER ### class Lexer: def __init__(self, fname, text): self.text = text self.pos = Position(-1, 0, -1, fname, text) self.current_char = None self.increment() def increment(self): self.pos.increment(self.current_char) self.current_char = self.text[self.pos.index] if self.pos.index < len(self.text) else None def make_tokens(self): tokens = [] while self.current_char != None: if self.current_char in ' \t': self.increment() elif self.current_char in ';\n': tokens.append(self.make_newline()) elif self.current_char in DIGITS: tokens.append(self.define_number()) elif self.current_char in LETTERS: tokens.append(self.make_identifier()) elif self.current_char == '"': tok, error = self.make_string() if error: return [], error tokens.append(tok) elif self.current_char == '+': tokens.append(Token(TT_PLUS, pos_start=self.pos)) self.increment() elif self.current_char == '-': tokens.append(Token(TT_MINUS, pos_start=self.pos)) self.increment() elif self.current_char == '*': tokens.append(Token(TT_MUL, pos_start=self.pos)) self.increment() elif self.current_char == '/': tokens.append(Token(TT_DIV, pos_start=self.pos)) self.increment() elif self.current_char == '^': tokens.append(Token(TT_POW, pos_start=self.pos)) self.increment() elif self.current_char == '(': tokens.append(Token(TT_LPAREN, pos_start=self.pos)) self.increment() elif self.current_char == ')': tokens.append(Token(TT_RPAREN, pos_start=self.pos)) self.increment() elif self.current_char == '!': tok, error = self.make_not_equals() if error: return [], error tokens.append(tok) elif self.current_char == '=': tokens.append(self.make_equals()) elif self.current_char == '<': tokens.append(self.make_less_than()) elif self.current_char == '>': tokens.append(self.make_greater_than()) else: pos_start = self.pos.copy() char = self.current_char self.increment() return [], IllegalCharError(pos_start, self.pos, "'" + char + "'") tokens.append(Token(TT_EOF, pos_start=self.pos)) return tokens, None def make_newline(self): pos_start = self.pos.copy() while self.current_char != None and self.current_char in ';\n': self.increment() return Token(TT_NEWLINE, pos_start=pos_start, pos_end=self.pos) def define_number(self): num_str = '' period_count = 0 pos_start = self.pos.copy() while self.current_char != None and self.current_char in DIGITS + '.': if self.current_char == '.': if period_count == 1: break period_count += 1 num_str += '.' else: num_str += self.current_char self.increment() if period_count == 0: return Token(TT_INT, int(num_str), pos_start, self.pos) else: return Token(TT_FLOAT, float(num_str), pos_start, self.pos) def make_string(self): string = '' pos_start = self.pos.copy() self.increment() while self.current_char != None and self.current_char != '"': if self.current_char in '\n': return None, ExpectedCharError(pos_start, self.pos, "Expected ' \" ' ending") string += self.current_char self.increment() self.increment() return Token(TT_STRING, string, pos_start, self.pos), None def make_identifier(self): id_str = '' pos_start = self.pos.copy() while self.current_char != None and self.current_char in LETTERS_DIGITS + '_': id_str += self.current_char self.increment() tok_type = TT_KEYWORD if id_str in KEYWORDS else TT_IDENTIFIER return Token(tok_type, id_str, pos_start, self.pos) def make_not_equals(self): pos_start = self.pos.copy() self.increment() if self.current_char == '=': self.increment() return Token(TT_NE, pos_start=pos_start, pos_end=self.pos), None self.increment() return None, ExpectedCharError(pos_start, self.pos, "Expected '=' (after '!')") def make_equals(self): tok_type = TT_EQ pos_start = self.pos.copy() self.increment() if self.current_char == '=': self.increment() tok_type = TT_EE return Token(tok_type, pos_start=pos_start, pos_end=self.pos) def make_less_than(self): tok_type = TT_LT pos_start = self.pos.copy() self.increment() if self.current_char == '=': self.increment() tok_type = TT_LTE return Token(tok_type, pos_start=pos_start, pos_end=self.pos) def make_greater_than(self): tok_type = TT_GT pos_start = self.pos.copy() self.increment() if self.current_char == '=': self.increment() tok_type = TT_GTE return Token(tok_type, pos_start=pos_start, pos_end=self.pos) ### NODES ### class NumberNode: def __init__(self, token): self.token = token self.pos_start = self.token.pos_start self.pos_end = self.token.pos_end def __repr__(self): return f"{self.token.value}" class StringNode: def __init__(self, token): self.token = token self.pos_start = self.token.pos_start self.pos_end = self.token.pos_end def __repr__(self): return f'"{self.token.value}"' class KeywordValueNode: def __init__(self, token): self.token = token self.pos_start = self.token.pos_start self.pos_end = self.token.pos_end def __repr__(self): return f"{self.token.value}" class IdentifierNode: def __init__(self, id_name_token): self.id_name_token = id_name_token self.pos_start = self.id_name_token.pos_start self.pos_end = self.id_name_token.pos_end def __repr__(self): return f"{self.id_name_token.value}" class IdAssignNode: def __init__(self, id_name_token, value_node, manual_static, manual_static_token): self.id_name_token = id_name_token self.value_node = value_node self.manual_static = manual_static self.manual_static_token = manual_static_token self.pos_start = self.id_name_token.pos_start self.pos_end = self.value_node.pos_end def __repr__(self): return f"IdentifierAssign({self.id_name_token.value}, {self.value_node})" class IfNode: def __init__(self, if_value, elif_values, else_value, pos_start, pos_end): self.if_value = if_value self.elif_values = elif_values self.else_value = else_value self.pos_start = pos_start self.pos_end = pos_end def __repr__(self): return f"if({self.if_value}, {self.elif_values}, {self.else_value})" class TypeChoiceNode: def __init__(self, identifier, type_, expr): self.identifier = identifier self.type = type_ self.expr = expr self.pos_start = self.identifier.pos_start self.pos_end = self.expr.pos_end def __repr__(self): return f"TypeChoice({self.identifier}, {self.type}, {self.expr})" class ForNode: def __init__(self, for_keyword_token, var_name_token, start_value_node, end_value_node, step_value_node, statements): self.for_keyword_token = for_keyword_token self.var_name_token = var_name_token self.start_value_node = start_value_node self.end_value_node = end_value_node self.step_value_node = step_value_node self.statements = statements self.pos_start = self.for_keyword_token.pos_start self.pos_end = self.step_value_node.pos_end if self.step_value_node else self.end_value_node.pos_end def __repr__(self): return f'for({self.var_name_token}={self.start_value_node, self.end_value_node}, {self.step_value_node}, {self.statements})' class WhileNode: def __init__(self, while_keyword_token, condition_node, statements): self.while_keyword_token = while_keyword_token self.condition_node = condition_node self.statements = statements self.pos_start = self.while_keyword_token.pos_start self.pos_end = self.condition_node.pos_end def __repr__(self): return f'while({self.conditioin_node}, {self.statements})' class BinOpNode: def __init__(self, left_node, op_token, right_node): self.left_node = left_node self.op_token = op_token self.right_node = right_node self.pos_start = self.left_node.pos_start self.pos_end = self.right_node.pos_end def __repr__(self): return f"({self.left_node}, {self.op_token}, {self.right_node})" class UnaryOpNode: def __init__(self, op_token, node): self.op_token = op_token self.node = node self.pos_start = self.op_token.pos_start self.pos_end = self.node.pos_end def __repr__(self): return f"({self.op_token}, {self.node})" class StatementNode: def __init__(self, statement_node): self.statement_node = statement_node self.pos_start = statement_node.pos_start self.pos_end = statement_node.pos_end def __repr__(self): return f'{self.statement_node}' class CodeNode: def __init__(self, statements): self.statements = statements def __repr__(self): return f'{self.statements}' ### PARSE RESULT class ParseResult: def __init__(self): self.error = None self.node = None def register(self, res): if isinstance(res, ParseResult): if res.error: self.error = res.error return res.node return res def success(self, node): self.node = node return self def failure(self, error): self.error = error return self ### PARSER ### class Parser: def __init__(self, tokens): self.tokens = tokens self.token_index = -1 self.increment() def increment(self): self.token_index += 1 if self.token_index < len(self.tokens): self.current_token = self.tokens[self.token_index] return self.current_token def parser(self): res = self.code() return res def atom(self): res = ParseResult() tok = self.current_token if tok.type in (TT_INT, TT_FLOAT): res.register(self.increment()) return res.success(NumberNode(tok)) if tok.type in TT_STRING: res.register(self.increment()) return res.success(StringNode(tok)) elif tok.type == TT_IDENTIFIER: res.register(self.increment()) return res.success(IdentifierNode(tok)) elif tok.type == TT_KEYWORD and tok.value == 'True': res.register(self.increment()) return res.success(KeywordValueNode(tok)) elif tok.type == TT_KEYWORD and tok.value == 'False': res.register(self.increment()) return res.success(KeywordValueNode(tok)) elif tok.type == TT_LPAREN: res.register(self.increment()) expr = res.register(self.expr()) if res.error: return res if self.current_token.type == TT_RPAREN: res.register(self.increment()) return res.success(expr) else: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected ')'" )) return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected expression or '('" )) def power(self): return self.bin_op(self.atom, (TT_POW,), self.factor) def factor(self): res = ParseResult() tok = self.current_token if tok.type in (TT_PLUS, TT_MINUS): res.register(self.increment()) factor = res.register(self.factor()) if res.error: return res return res.success(UnaryOpNode(tok, factor)) return self.power() def term(self): return self.bin_op(self.factor, (TT_MUL, TT_DIV)) def arith_expr(self): return self.bin_op(self.term, (TT_PLUS, TT_MINUS)) def comp_expr(self): res = ParseResult() if self.current_token.type == TT_KEYWORD and self.current_token.value == 'not': op_token = self.current_token res.register(self.increment()) node = res.register(self.comp_expr()) if res.error: return res return res.success(UnaryOpNode(op_token, node)) node = res.register(self.bin_op(self.arith_expr, (TT_EE, TT_NE, TT_LT, TT_GT, TT_LTE, TT_GTE))) if res.error: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected expression, '(' or 'not'" )) return res.success(node) def identifier(self): res = ParseResult() manual_static = False manual_static_token = None if self.current_token.type == TT_KEYWORD and self.current_token.value == 'static': manual_static = True manual_static_token = self.current_token res.register(self.increment()) if self.current_token.type != TT_IDENTIFIER: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected valid identifier name" )) identifier_name = self.current_token res.register(self.increment()) if self.current_token.type != TT_EQ: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected '='" )) res.register(self.increment()) expr = res.register(self.expr()) if res.error: return res return res.success(IdAssignNode(identifier_name, expr, manual_static, manual_static_token)) def expr(self): res = ParseResult() if (self.current_token.type == TT_IDENTIFIER and self.tokens[self.token_index + 1].type == TT_EQ) or (self.current_token.type == TT_KEYWORD and self.current_token.value == 'static'): return res.success(res.register(self.identifier())) else: return self.bin_op(self.comp_expr, ((TT_KEYWORD, 'and'), (TT_KEYWORD, 'or'))) def if_expr(self): res = ParseResult() pos_start = self.current_token.pos_start if_token = self.current_token res.register(self.increment()) if_expr_value = [] if_expr_value.append(res.register(self.atom())) if res.error: return res if_expr_value.append(res.register( self.code_block(( (TT_KEYWORD, 'elif'), (TT_KEYWORD, 'else'), (TT_KEYWORD, 'end') ), if_token.pos_start, if_token.pos_end) )) if res.error: return res elif_expr_values = [] while self.current_token.type == TT_KEYWORD and self.current_token.value == 'elif': elif_expr_value = [] elif_token = self.current_token res.register(self.increment()) elif_expr_value.append(res.register(self.atom())) if res.error: return res elif_expr_value.append(res.register( self.code_block(( (TT_KEYWORD, 'elif'), (TT_KEYWORD, 'else'), (TT_KEYWORD, 'end') ), elif_token.pos_start, elif_token.pos_end) )) if res.error: return res elif_expr_values.append(elif_expr_value) else_expr_value = None if self.current_token.type == TT_KEYWORD and self.current_token.value == 'else': else_token = self.current_token res.register(self.increment()) else_expr_value = (res.register( self.code_block(( (TT_KEYWORD, 'end'), ), else_token.pos_start, else_token.pos_end) )) if res.error: return res pos_end = self.current_token.pos_end res.register(self.increment()) return res.success(IfNode(if_expr_value, elif_expr_values, else_expr_value, pos_start, pos_end)) def type_choice(self): res = ParseResult() res.register(self.increment()) if self.current_token.type != TT_IDENTIFIER: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected an identifier name" )) identifier = self.current_token res.register(self.increment()) if self.current_token.type != TT_LT: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected '<'" )) res.register(self.increment()) if self.current_token.type == TT_KEYWORD and self.current_token.value == 'int': type_ = self.current_token elif self.current_token.type == TT_KEYWORD and self.current_token.value == 'float': type_ = self.current_token elif self.current_token.type == TT_KEYWORD and self.current_token.value == 'bool': type_ = self.current_token else: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected 'int' or 'float'" )) res.register(self.increment()) if self.current_token.type != TT_GT: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected '>'" )) res.register(self.increment()) expr = res.register(self.statement()) if res.error: return res return res.success(TypeChoiceNode(identifier, type_, expr)) def for_expr(self): res = ParseResult() for_keyword_token = self.current_token res.register(self.increment()) if self.current_token.type != TT_IDENTIFIER: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected identifier (after 'for')" )) var_name = self.current_token res.register(self.increment()) if self.current_token.type != TT_EQ: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected '=' (after identifier)" )) res.register(self.increment()) start_value = res.register(self.atom()) if res.error: return res if not (self.current_token.type == TT_KEYWORD and self.current_token.value == 'to'): return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected 'to' keyword (after identifier's starting point in for statement)" )) res.register(self.increment()) end_value = res.register(self.atom()) if res.error: return res if self.current_token.type == TT_KEYWORD and self.current_token.value == 'step': res.register(self.increment()) step_value = res.register(self.atom()) if res.error: return res else: step_value = None statements = res.register(self.code_block(( (TT_KEYWORD, 'end'), ), for_keyword_token.pos_start, for_keyword_token.pos_end )) if res.error: return res res.register(self.increment()) return res.success(ForNode( for_keyword_token, var_name, start_value, end_value, step_value, statements )) def while_expr(self): res = ParseResult() while_keyword_token = self.current_token res.register(self.increment()) condition = res.register(self.atom()) if res.error: return res statements = res.register(self.code_block(( (TT_KEYWORD, 'end'), ), while_keyword_token.pos_start, while_keyword_token.pos_end )) if res.error: return res res.register(self.increment()) return res.success(WhileNode( while_keyword_token, condition, statements )) def statement(self): res = ParseResult() if self.current_token.type == TT_KEYWORD and self.current_token.value == 'if': statement = res.register(self.if_expr()) elif self.current_token.type == TT_KEYWORD and self.current_token.value == 'choice': statement = res.register(self.type_choice()) elif self.current_token.type == TT_KEYWORD and self.current_token.value == 'for': statement = res.register(self.for_expr()) elif self.current_token.type == TT_KEYWORD and self.current_token.value == 'while': statement = res.register(self.while_expr()) else: statement = res.register(self.expr()) if res.error: return res return res.success(StatementNode(statement)) def code(self): res = ParseResult() statements = res.register(self.code_block((TT_EOF,),)) if res.error: return res return res.success(CodeNode(statements)) ####################################################### def bin_op(self, func_a, ops, func_b=None): if func_b is None: func_b = func_a res = ParseResult() left = res.register(func_a()) if res.error: return res while self.current_token.type in ops or (self.current_token.type, self.current_token.value) in ops: op_token = self.current_token res.register(self.increment()) right = res.register(func_b()) if res.error: return res left = BinOpNode(left, op_token, right) return res.success(left) def code_block(self, ends, pos_start=None, pos_end=None): statements = [] res = ParseResult() if self.current_token.type == TT_NEWLINE: res.register(self.increment()) statements.append(res.register(self.statement())) if res.error: return res while True: if self.current_token.type in ends or (self.current_token.type, self.current_token.value) in ends: break try: if self.tokens[self.token_index+1].type in ends or (self.tokens[self.token_index+1].type, self.tokens[self.token_index+1].value) in ends: break except IndexError: if self.current_token.type == TT_EOF: return res.failure(InvalidSyntaxError( pos_start, pos_end, "This code-block starting have no ending" )) if self.current_token.type != TT_NEWLINE: return res.failure(InvalidSyntaxError( self.current_token.pos_start, self.current_token.pos_end, "Expected '\\n' or ';'" )) res.register(self.increment()) statements.append(res.register(self.statement())) if res.error: return res if self.current_token.type == TT_NEWLINE: res.register(self.increment()) return res.success(statements) ### Nodes ### class IntNode: def __init__(self, value: int): self.value = value self.type = 'int' def __repr__(self): return f'{self.value}' class DoubleNode: def __init__(self, value: float): self.value = value self.type = 'double' def __repr__(self): return f'{self.value}' class TrueNode: def __init__(self): self.value = 1 self.type = '_Bool' def __repr__(self): return f'{self.value}' class FalseNode: def __init__(self): self.value = 0 self.type = '_Bool' def __repr__(self): return f'{self.value}' class CStringNode: def __init__(self, value): self.value = value self.type = 'str' self.str_length = len(eval(f'"{value}"'))+1 def __repr__(self): return f'"{self.value}"' class SymbolNode: def __init__(self, symbols=None, name=None, branch=None, type_=None, for_var=False): if for_var: self.for_var = True self.name = name self.type = type_ else: self.for_var = False self.symbols = symbols self.name = name self.branch = branch.copy() self.type = self.symbols[self.name].type self.symbols[self.name].usage_count += 1 self.symbol_usage = self.symbols[self.name].usage_count self.type_choice = self.symbols[self.name].type_choice if isinstance(self.type, list) and self.type_choice is not None: self.type = self.type_choice if self.type == 'str': self.str_length = self.symbols[name].str_length def __repr__(self): if self.for_var: return f'{self.name}_i' self.symbol = self.symbols[self.name] if isinstance(self.symbol, Variable): return f'{self.symbol.name}_' if isinstance(self.type, list) or self.type=='str': result = f'identifiers[{self.symbol.identifier}]' else: result = f'*({self.type}*)identifiers[{self.symbol.identifier}]' if self.symbol_usage == self.symbol.usage_count: if self.branch: return result + f'\\*late-after:free(identifiers[{self.symbol.identifier}])*\\' return result + f'\\*after:free(identifiers[{self.symbol.identifier}])*\\' return result class SymbolAssignNode: def __init__(self, symbols, name, node, type_change=False, branch_init=False): self.symbols = symbols self.name = name self.node = node self.type_change = type_change self.branch_init = branch_init self.type = node.type self.symbol_type = self.symbols[self.name].type self.assign_count = self.symbols[self.name].assign_count self.usage_till_now = self.symbols[self.name].usage_count if self.type == 'str': self.symbols[self.name].str_length = node.str_length def __repr__(self): self.symbol = self.symbols[self.name] if self.symbol.usage_count - self.usage_till_now == 0: return f'{self.node}' if isinstance(self.symbol, Variable): return f'{self.symbol.name}_ = {self.node}' if self.type == 'str': core_name = f'strcpy(identifiers[{self.symbol.identifier}], {self.node})' core_size = f'sizeof(char)*{self.node.str_length}' self.type_change = True else: core_name = f'*({self.type}*)identifiers[{self.symbol.identifier}] = {self.node}' core_size = f'sizeof({self.type})' if self.branch_init: return f'{core_name}\\*before:identifiers[{self.symbol.identifier}] = realloc(identifiers[{self.symbol.identifier}], {core_size});after:{self.symbol.name}_type = {self.symbol.type.index(self.node.type)};pre-before:int {self.symbol.name}_type = 0;pre-before:identifiers[{self.symbol.identifier}] = 0*\\' if self.assign_count == 1: if isinstance(self.symbol.type, list): return f'{core_name}\\*before:identifiers[{self.symbol.identifier}] = malloc({core_size});before:int {self.symbol.name}_type = {self.symbol_type.index(self.type)}*\\' return f'{core_name}\\*before:identifiers[{self.symbol.identifier}] = malloc({core_size})*\\' if self.type_change: if isinstance(self.symbol_type, list): return f'{core_name}\\*before:identifiers[{self.symbol.identifier}] = realloc(identifiers[{self.symbol.identifier}], {core_size});after:{self.symbol.name}_type = {self.symbol.type.index(self.node.type)}*\\' return f'{core_name}\\*before:identifiers[{self.symbol.identifier}] = realloc(identifiers[{self.symbol.identifier}], {core_size})*\\' return core_name class NegateNode: def __init__(self, node): self.node = node self.type = self.node.type def __repr__(self): return f'-({self.node})' class AddNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node if self.left_node == 'double' or self.right_node == 'double': self.type = 'double' else: self.type = 'int' def __repr__(self): return f'({self.left_node})+({self.right_node})' class SubtractNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node if self.left_node == 'double' or self.right_node == 'double': self.type = 'double' else: self.type = 'int' def __repr__(self): return f'({self.left_node})-({self.right_node})' class MultiplyNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node if self.left_node == 'double' or self.right_node == 'double': self.type = 'double' else: self.type = 'int' def __repr__(self): return f'({self.left_node})*({self.right_node})' class DivideNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node self.type = 'double' def __repr__(self): return f'({self.left_node})/(double)({self.right_node})' class FunctionCallNode: def __init__(self, name, type_, arguments): self.name = name self.type = type_ self.arguments = arguments def __repr__(self): arg_str = '' first = True for argument in self.arguments: if first: arg_str += f'{argument}' else: arg_str += f', {argument}' first = False return f'({self.type}){self.name}({arg_str})' class EqualNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node self.type = '_Bool' def __repr__(self): return f'({self.left_node})==({self.right_node})' class NotEqualNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node def __repr__(self): return f'({self.left_node})!=({self.right_node})' class LessThanNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node self.type = '_Bool' def __repr__(self): return f'({self.left_node})<({self.right_node})' class GreaterThanNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node self.type = '_Bool' def __repr__(self): return f'({self.left_node})>({self.right_node})' class LessThanEqualNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node self.type = '_Bool' def __repr__(self): return f'({self.left_node})<=({self.right_node})' class GreaterThanEqualNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node self.type = '_Bool' def __repr__(self): return f'({self.left_node})>=({self.right_node})' class AndNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node self.type = '_Bool' def __repr__(self): return f'({self.left_node})&&({self.right_node})' class OrNode: def __init__(self, left_node, right_node): self.left_node = left_node self.right_node = right_node self.type = '_Bool' def __repr__(self): return f'({self.left_node})||({self.right_node})' class NotNode: def __init__(self, node): self.node = node self.type = '_Bool' def __repr__(self): return f'!({self.node})' class CodeBlock: def commandize(cmds, statement_str): result = statement_str for cmd in cmds: cmd = cmd.replace('\\*', '') cmd = cmd.replace('*\\', '') for command in cmd.split(';'): if not command: continue head, line = command.split(':') if head == 'before': result = f'{line};\n{result}' if head == 'after': result = f'{result};\n{line}' if head == 'pre-before': result += f'\\*before:{line}*\\' if head == 'late-after': result += f'\\*after:{line}*\\' result += ';\n' return result def code_block(statements): result = '' for statement in statements: statement_str = statement.__repr__() if len(statement_str) < 2: result += statement_str + ';\n' continue cmd = [] while '\\*' in statement_str: tmpi = statement_str.find('\\*') tmp = statement_str[tmpi:statement_str.find('*\\', tmpi)+2] statement_str = statement_str.replace(tmp, '') cmd.append(tmp) result += CodeBlock.commandize(cmd, statement_str) return result class CIfNode: def __init__(self, if_value, elif_values=[], else_value=None): self.if_value = if_value self.elif_values = elif_values self.else_value = else_value def __repr__(self): result = f'if ({self.if_value[0]}){{\n' result += CodeBlock.code_block(self.if_value[1]) result += '}' for elif_value in self.elif_values: result += f'else if ({elif_value[0]}){{\n' result += CodeBlock.code_block(elif_value[1]) result += '}' if self.else_value: result += 'else{\n' result += CodeBlock.code_block(self.else_value) result += '}' return result class CForNode: def __init__(self, identifier_name, var_type, start_value, end_value, step_value, statements): self.identifier_name = identifier_name self.var_type = var_type self.start_value = start_value self.end_value = end_value self.step_value = step_value self.statements = statements def __repr__(self): result = 'for (' result += f'{self.var_type} {self.identifier_name}_i = {self.start_value}; ' result += f'{self.identifier_name}_i < {self.end_value}; ' if self.step_value == 1: result += f'{self.identifier_name}_i++' else: result += f'{self.identifier_name}_i+={self.step_value}' result += '){\n' result += CodeBlock.code_block(self.statements) result += '}' return result class CWhileNode: def __init__(self, condition, statements): self.condition = condition self.statements = statements def __repr__(self): result = f'while ({self.condition}){{\n' result += CodeBlock.code_block(self.statements) result += '}' return result class CStatementNode: def __init__(self, node): self.node = node def __repr__(self): return f'{self.node}' class GlobalVariableNode: def __init__(self, name, type_): self.name = name self.type = type_ def __repr__(self): return f'{self.type} {self.name};\n' class CCodeNode: def __init__(self, libraries, identifier_count, global_variables, statement_nodes): self.libraries = libraries self.identifier_count = identifier_count self.global_variables = global_variables self.statement_nodes = statement_nodes def commandize(self, cmds, statement_str): result = statement_str for cmd in cmds: cmd = cmd.replace('\\*', '') cmd = cmd.replace('*\\', '') for command in cmd.split(';'): if not command: continue head, line = command.split(':') if head == 'before': result = f'{line};\n{result}' if head == 'after': result = f'{result};\n{line}' result += ';\n' return result def __repr__(self): result = '' for library in self.libraries: result += f'{library}' if self.identifier_count: result += f'\nvoid* identifiers[{self.identifier_count}];\n' for variable in self.global_variables: if variable.usage_count: result += f'{variable.type} {variable.name}_;\n' result += 'int main(){\n' for statement in self.statement_nodes: statement_str = statement.__repr__() if len(statement_str) < 2: result += statement_str + ';\n' continue cmd = [] while '\\*' in statement_str: tmpi = statement_str.find('\\*') tmp = statement_str[tmpi:statement_str.find('*\\', tmpi)+2] statement_str = statement_str.replace(tmp, '') cmd.append(tmp) result += self.commandize(cmd, statement_str) result += 'return 0;\n}' return result ### AnalizeResult ### class AnalizeResult: def __init__(self): self.value = None self.error = None def register(self, res): if res.error: self.error = res.error return res.value def success(self, value): self.value = value return self def failure(self, error): self.error = error return self class Symbol: def __init__(self, name, type_): self.name = name self.type = type_ self.assign_count = 0 self.usage_count = 0 self.manual_static = False self.if_thingy = 0 self.is_branching = False self.type_choice = None class Identifier(Symbol): def __init__(self, name, type_, identifier, start_assign_count=0, start_usage_count=0, type_choice=None, if_thingy=0, is_branching=False): super().__init__(name, type_) self.identifier = identifier self.first_type = self.type self.assign_count = start_assign_count self.usage_count = start_usage_count self.if_thingy = if_thingy self.is_branching = is_branching self.type_choice = type_choice class Variable(Symbol): def __init__(self, name, type_, manual_static): super().__init__(name, type_) self.manual_static = manual_static def convert_to_identifier(self, identifier): if self.manual_static: return return Identifier(self.name, self.type, identifier, self.assign_count, self.usage_count, self.type_choice, self.if_thingy, self.is_branching) ### SymbolTable ### class SymbolTable: BRANCH_IF = 0 BRANCH_ELSE = 1 BRANCH_WHILE = 2 BRANCH_FOR = 3 def __init__(self): self.branchs = [] self.branch_count = 0 self.symbols = {} self.identifier_count = 0 self.global_variables = [] self.for_refrence = None def symbol_get(self, name, node): res = AnalizeResult() if self.for_refrence is not None: if name in self.for_refrence: return res.success(SymbolNode(for_var=True, name=name, type_=self.for_refrence[1])) if name in self.symbols.keys(): return res.success(SymbolNode(self.symbols, name, self.branchs)) return res.failure( NameError_(node.pos_start, node.pos_end, f"Name '{name}' is not defined") ) def symbol_assign(self, name, node, value_node, manual_static, libraries): res = AnalizeResult() if name in self.symbols.keys(): symbol = self.symbols[name] if isinstance(symbol, Identifier) and manual_static: return res.failure( TypeError_(node.manual_static_token.pos_start, node.manual_static_token.pos_end, "Can't make a dynamic variable static" )) if value_node.type == symbol.type and not self.branchs: if manual_static: symbol.manual_static = True symbol.assign_count += 1 return res.success(SymbolAssignNode(self.symbols, name, value_node)) if symbol.manual_static or manual_static: return res.failure( TypeError_(node.pos_start, node.pos_end, f"Can't change static variables's type" )) if isinstance(symbol, Variable) and value_node.type != symbol.type: self.global_variables.remove(symbol) symbol = symbol.convert_to_identifier(self.identifier_count) self.symbols[name] = symbol self.identifier_count += 1 libraries.add('#include<stdlib.h>\n') if self.branchs: if self.branchs[-1] in (SymbolTable.BRANCH_IF, SymbolTable.BRANCH_FOR, SymbolTable.BRANCH_WHILE) : if value_node.type != symbol.type: if isinstance(symbol.type, list): if value_node.type not in symbol.type: symbol.type.append(value_node.type) if value_node.type == symbol.type[0]: symbol.is_branching = True else: symbol.if_thingy = symbol.type symbol.type = [symbol.type, value_node.type] else: symbol.is_branching = True symbol.if_thingy = symbol.type elif self.branchs[-1] == SymbolTable.BRANCH_ELSE: if isinstance(symbol.type, list): if value_node.type != symbol.if_thingy: if value_node.type not in symbol.type: symbol.type[0] = value_node.type elif not symbol.is_branching: symbol.type.remove(symbol.type[0]) else: symbol.type = [symbol.type, value_node.type] else: symbol.type = value_node.type if len(set(symbol.type)) == 1: symbol.type = symbol.type[0] symbol.assign_count += 1 return res.success(SymbolAssignNode(self.symbols, name, value_node, type_change=True)) if self.branchs: if self.branchs[-1] == SymbolTable.BRANCH_WHILE or self.branchs[-1] == SymbolTable.BRANCH_FOR: return res.failure(NameError_( node.pos_start, node.pos_end, f"Name '{name}' is not defined, hint: can't initiate identifiers inside a loop" )) print(CompileTimeWarnning( node.pos_start, node.pos_end, f"Should not initiate identifiers inside a branch, could cause None type identifiers" ).as_string()) symbol = Identifier(name, None, self.identifier_count) self.symbols[name] = symbol self.identifier_count += 1 libraries.add('#include<stdlib.h>\n') if self.branchs[-1] == SymbolTable.BRANCH_IF: symbol.type = [symbol.type, value_node.type] symbol.if_thingy == self.branch_count else: symbol.type = [symbol.type, value_node.type] symbol.assign_count += 1 if value_node.type == 'str': libraries.add('#include<stdlib.h>\n') libraries.add('#include<string.h>\n') return res.success(SymbolAssignNode(self.symbols, name, value_node, branch_init=True)) if value_node.type == 'str': libraries.add('#include<stdlib.h>\n') libraries.add('#include<string.h>\n') symbol = Identifier(name, value_node.type, self.identifier_count) self.identifier_count += 1 else: symbol = Variable(name, value_node.type, manual_static) self.global_variables.append(symbol) self.symbols[name] = symbol symbol.assign_count += 1 return res.success(SymbolAssignNode(self.symbols, name, value_node)) def symbol_type_choice(self, name, type_, node=None): type_map = {'int':'int', 'bool':'_Bool', 'float':'double'} res = AnalizeResult() if node is not None: if name not in self.symbols.keys(): return res.failure( NameError_(node.pos_start, node.pos_end, f"Name '{name}' is not defined") ) symbol = self.symbols[name] if not isinstance(symbol.type, list): return res.failure( TypeError_(node.pos_start, node.pos_end, "Can't type choice on single branch variables") ) if type_map[type_.value] not in symbol.type: return res.failure( TypeError_(type_.pos_start, type_.pos_end, f"Identifier '{name}' does not have '{type_.value}' type branch") ) self.symbols[name].type_choice = type_map[type_.value] return res.success(SymbolNode(self.symbols, name, self.branchs)) self.symbols[name].type_choice = type_ return res.success(SymbolNode(self.symbols, name, self.branchs)) def symbol_type_choice_end(self, name): self.symbols[name].type_choice = None def start_if_branch(self): self.branch_count += 1 self.branchs.append(SymbolTable.BRANCH_IF) def start_elif_branch(self): self.branchs.append(SymbolTable.BRANCH_IF) def start_else_branch(self): self.branchs.append(SymbolTable.BRANCH_ELSE) def start_while_branch(self): self.branchs.append(SymbolTable.BRANCH_WHILE) def start_for_branch(self, identifier_name, type_): self.for_refrence = identifier_name, type_ self.branchs.append(SymbolTable.BRANCH_FOR) def end_branch(self): self.for_refrence = None self.branchs.pop() ### Analizer ### class Analizer: def __init__(self): self.libraries = set() def visit(self, node): method_name = f'visit_{type(node).__name__}' method = getattr(self, method_name, self.no_visit_node) return method(node) def no_visit_node(self, node): raise Exception(f'No visit_{type(node).__name__} method defined.') def visit_NumberNode(self, node): res = AnalizeResult() if type(node.token.value) is int: return res.success(IntNode(node.token.value)) if type(node.token.value) is float: return res.success(DoubleNode(node.token.value)) def visit_StringNode(self, node): return AnalizeResult().success(CStringNode(node.token.value)) def visit_KeywordValueNode(self, node): res = AnalizeResult() if node.token.value == 'True': return res.success(TrueNode()) if node.token.value == 'False': return res.success(FalseNode()) def visit_IdentifierNode(self, node): return self.symbol_table.symbol_get(node.id_name_token.value, node) def visit_IdAssignNode(self, node): res = AnalizeResult() value_node = res.register(self.visit(node.value_node)) if res.error: return res if isinstance(value_node.type, list): if_expr = [] elif_expr = [] if None in value_node.type: value_node.type.remove(None) for i, value_type in enumerate(value_node.type): if not i: self.symbol_table.start_if_branch() symbol = res.register(self.symbol_table.symbol_get(value_node.name, node)) if res.error: return res if_expr.append( EqualNode( f'{symbol.name}_type', IntNode(symbol.type.index(value_type)))) res.register(self.symbol_table.symbol_type_choice(value_node.name, value_type)) if_expr.append([res.register(self.symbol_table.symbol_assign( node.id_name_token.value, node, res.register(self.visit(node.value_node)), node.manual_static, self.libraries))]) self.symbol_table.end_branch() else: tmpelif_expr = [] self.symbol_table.start_elif_branch() symbol = res.register(self.symbol_table.symbol_get(value_node.name, node)) if res.error: return res tmpelif_expr.append( EqualNode( f'{symbol.name}_type', IntNode(symbol.type.index(value_type)))) res.register(self.symbol_table.symbol_type_choice(value_node.name, value_type)) tmpelif_expr.append([res.register(self.symbol_table.symbol_assign( node.id_name_token.value, node, res.register(self.visit(node.value_node)), node.manual_static, self.libraries))]) elif_expr.append(tmpelif_expr) self.symbol_table.end_branch() if res.error: return res self.symbol_table.symbol_type_choice_end(value_node.name) return res.success(CIfNode(if_expr, elif_expr)) name = node.id_name_token.value answer = self.symbol_table.symbol_assign(name, node, value_node, node.manual_static, self.libraries) return answer def visit_IfNode(self, node): res = AnalizeResult() if_value_node = [] self.symbol_table.start_if_branch() if_value_node.append(res.register(self.visit(node.if_value[0]))) if res.error: return res if_statement_nodes = [] for statement in node.if_value[1]: if_statement_nodes.append(res.register(self.visit(statement))) if res.error: return res if_value_node.append(if_statement_nodes) self.symbol_table.end_branch() elif_value_nodes = [] for elif_value in node.elif_values: self.symbol_table.start_elif_branch() elif_expr, elif_statements = elif_value elif_value_node = [] elif_value_node.append(res.register(self.visit(elif_expr))) if res.error: return res elif_statement_nodes = [] for statement in elif_statements: elif_statement_nodes.append(res.register(self.visit(statement))) if res.error: return res elif_value_node.append(elif_statement_nodes) elif_value_nodes.append(elif_value_node) self.symbol_table.end_branch() else_value_node = [] if node.else_value is not None: self.symbol_table.start_else_branch() for statement in node.else_value: else_value_node.append(res.register(self.visit(statement))) if res.error: return res self.symbol_table.end_branch() return res.success(CIfNode(if_value_node, elif_value_nodes, else_value_node)) def visit_TypeChoiceNode(self, node): map_type = {'int':'int', 'bool':'_Bool', 'float':'double'} res = AnalizeResult() symbol = res.register(self.symbol_table.symbol_get(node.identifier.value, node)) if res.error: return res if_expr = [] res.register(self.symbol_table.symbol_type_choice(node.identifier.value, node.type, node)) if res.error: return res if_expr.append(EqualNode( f'{symbol.name}_type', IntNode(symbol.type.index(map_type[node.type.value])) )) self.symbol_table.start_if_branch() expr = res.register(self.visit(node.expr)) if res.error: return res if_expr.append([expr]) self.symbol_table.symbol_type_choice_end(node.identifier.value) self.symbol_table.end_branch() return res.success(CIfNode(if_expr)) def visit_ForNode(self, node): res = AnalizeResult() identifier_name = node.var_name_token.value start_value = res.register(self.visit(node.start_value_node)) if res.error: return res end_value = res.register(self.visit(node.end_value_node)) if res.error: return res step_value = res.register(self.visit(node.step_value_node)) if node.step_value_node else 1 if res.error: return res var_type = 'int' if start_value=='int' and end_value=='int' else 'float' self.symbol_table.start_for_branch(identifier_name, var_type) statements = [] for statement in node.statements: statements.append(res.register(self.visit(statement))) if res.error: return res self.symbol_table.end_branch() return res.success(CForNode( identifier_name, var_type, start_value, end_value, step_value, statements )) def visit_WhileNode(self, node): res = AnalizeResult() condition = res.register(self.visit(node.condition_node)) if res.error: return res self.symbol_table.start_while_branch() statements = [] for statement in node.statements: statements.append(res.register(self.visit(statement))) if res.error: return res self.symbol_table.end_branch() return res.success(CWhileNode( condition, statements )) def visit_BinOpNode(self, node): res = AnalizeResult() left_node = res.register(self.visit(node.left_node)) if res.error: return res right_node = res.register(self.visit(node.right_node)) if res.error: return res if node.op_token.type == TT_PLUS: return res.success(AddNode(left_node, right_node)) if node.op_token.type == TT_MINUS: return res.success(SubtractNode(left_node, right_node)) if node.op_token.type == TT_MUL: return res.success(MultiplyNode(left_node, right_node)) if node.op_token.type == TT_DIV: return res.success(DivideNode(left_node, right_node)) if node.op_token.type == TT_POW: type_ = 'double' if left_node.type == 'double' or right_node.type == 'double' else 'int' self.libraries.add('#include<math.h>\n') return res.success(FunctionCallNode('pow', type_, (left_node, right_node))) if node.op_token.type == TT_EE: return res.success(EqualNode(left_node, right_node)) if node.op_token.type == TT_EE: return res.success(NotEqualNode(left_node, right_node)) if node.op_token.type == TT_LT: return res.success(LessThanNode(left_node, right_node)) if node.op_token.type == TT_GT: return res.success(GreaterThanNode(left_node, right_node)) if node.op_token.type == TT_LTE: return res.success(LessThanEqualNode(left_node, right_node)) if node.op_token.type == TT_GTE: return res.success(GreaterThanEqualNode(left_node, right_node)) if node.op_token.type == TT_KEYWORD and node.op_token.value == 'and': return res.success(AndNode(left_node, right_node)) if node.op_token.type == TT_KEYWORD and node.op_token.value == 'or': return res.success(OrNode(left_node, right_node)) def visit_UnaryOpNode(self, node): res = AnalizeResult() value_node = res.register(self.visit(node.node)) if res.error: return res if node.op_token.type == TT_PLUS: return res.success(value_node) if node.op_token.type == TT_MINUS: return res.success(NegateNode(value_node)) if node.op_token.type == TT_KEYWORD and node.op_token.value == 'or': return res.success(NotNode(value_node)) def visit_StatementNode(self, node): res = AnalizeResult() statement_node = res.register(self.visit(node.statement_node)) if res.error: return res return res.success(CStatementNode(statement_node)) def visit_CodeNode(self, node): self.symbol_table = SymbolTable() res = AnalizeResult() statement_nodes = [] for statement in node.statements: statement_nodes.append(res.register(self.visit(statement))) if res.error: return res return res.success(CCodeNode(self.libraries, self.symbol_table.identifier_count, self.symbol_table.global_variables, statement_nodes)) ### RUN ### def lex(file_name, context): # Lexing lexer = Lexer(file_name, context) return lexer.make_tokens() def parse(tokens): # Parsing parser = Parser(tokens) return parser.parser() def analize(abstractSyntaxTree): # analizing analizer = Analizer() return analizer.visit(abstractSyntaxTree.node) def run(file_name, context): tokens, error = lex(file_name, context) if error: return None, error ast = parse(tokens) if ast.error: return None, ast.error c = analize(ast) if c.error: return None, c.error return c.value, None def open_code(file_name): try: with open(file_name, 'r') as f: context = f.read() return context except FileNotFoundError as exeption: print(exeption) sys.exit() def main(): if len(sys.argv) < 2: print("""Please provide any arguments: compile <file_name> c <file_name> lex <file_name> parse <file_name> """) sys.exit() if len(sys.argv) < 3: print("Please also provide the file path of the script :)") sys.exit() cmd = sys.argv[1].lower() file_name = sys.argv[2] context = open_code(file_name) if cmd == 'compile': result, error = run(file_name, context) if error: print(error.as_string()) else: xfile = file_name.split('.')[0] with open(xfile+'.c', 'w') as f: f.write(result.__repr__()) try: subprocess.Popen(f"gcc -O2 {xfile+'.c'} -o {xfile+'.exe'}") except FileNotFoundError: print("gcc is not installed, you need gcc compiler to use this program ;(") elif cmd == 'run': result, error = run(file_name, context) if error: print(error.as_string()) else: xfile = file_name.split('.')[0] with open(xfile+'.c', 'w') as f: f.write(result.__repr__()) try: subprocess.Popen(f"gcc -O2 {xfile+'.c'} -o {xfile+'.exe'}") subprocess.Popen(f"./{xfile+'.exe'}") except FileNotFoundError: print("gcc is not installed, you need gcc compiler to use this program ;(") elif cmd == 'c': result, error = run(file_name, context) if error: print(error.as_string()) else: print(result) elif cmd == 'lex': result, error = lex(file_name, context) if error: print(error.as_string()) else: print(result) elif cmd == 'parse': tokens, error = lex(file_name, context) if error: print(error.as_string()) else: ast = parse(tokens) if ast.error: print(ast.error.as_string()) else: print(ast.node) else: print(f"""Invalid argument {cmd} :( Please provide one of the valid arguments: compile <file_name> c <file_name> lex <file_name> parse <file_name> """) sys.exit() if __name__ == "__main__": main()
import threading from datetime import datetime from typing import Callable, Dict, Set, List from dataclasses import dataclass import zmq import zmq.auth from zmq.backend.cython.constants import NOBLOCK from tzlocal import get_localzone import pytz from vnpy.trader.constant import ( Direction, Exchange, OrderType, Product, Status, Interval ) from vnpy.trader.gateway import BaseGateway from vnpy.trader.object import ( TickData, OrderData, TradeData, PositionData, AccountData, ContractData, OrderRequest, CancelRequest, SubscribeRequest, HistoryRequest, BarData ) PERIOD_M1 = 1 PERIOD_H1 = 16385 PERIOD_D1 = 16408 FUNCTION_QUERYCONTRACT = 0 FUNCTION_QUERYORDER = 1 FUNCTION_QUERYHISTORY = 2 FUNCTION_SUBSCRIBE = 3 FUNCTION_SENDORDER = 4 FUNCTION_CANCELORDER = 5 ORDER_STATE_STARTED = 0 ORDER_STATE_PLACED = 1 ORDER_STATE_CANCELED = 2 ORDER_STATE_PARTIAL = 3 ORDER_STATE_FILLED = 4 ORDER_STATE_REJECTED = 5 POSITION_TYPE_BUY = 0 POSITION_TYPE_SELL = 1 TRADE_TRANSACTION_ORDER_ADD = 0 TRADE_TRANSACTION_ORDER_UPDATE = 1 TRADE_TRANSACTION_ORDER_DELETE = 2 TRADE_TRANSACTION_HISTORY_ADD = 6 TRADE_TRANSACTION_REQUEST = 10 TRADE_RETCODE_MARKET_CLOSED = 10018 TYPE_BUY = 0 TYPE_SELL = 1 TYPE_BUY_LIMIT = 2 TYPE_SELL_LIMIT = 3 TYPE_BUY_STOP = 4 TYPE_SELL_STOP = 5 INTERVAL_VT2MT = { Interval.MINUTE: PERIOD_M1, Interval.HOUR: PERIOD_H1, Interval.DAILY: PERIOD_D1, } STATUS_MT2VT = { ORDER_STATE_STARTED: Status.SUBMITTING, ORDER_STATE_PLACED: Status.NOTTRADED, ORDER_STATE_CANCELED: Status.CANCELLED, ORDER_STATE_PARTIAL: Status.PARTTRADED, ORDER_STATE_FILLED: Status.ALLTRADED, ORDER_STATE_REJECTED: Status.REJECTED } ORDERTYPE_MT2VT = { TYPE_BUY: (Direction.LONG, OrderType.MARKET), TYPE_SELL: (Direction.SHORT, OrderType.MARKET), TYPE_BUY_LIMIT: (Direction.LONG, OrderType.LIMIT), TYPE_SELL_LIMIT: (Direction.SHORT, OrderType.LIMIT), TYPE_BUY_STOP: (Direction.LONG, OrderType.STOP), TYPE_SELL_STOP: (Direction.SHORT, OrderType.STOP), } ORDERTYPE_VT2MT = {v: k for k, v in ORDERTYPE_MT2VT.items()} LOCAL_TZ = get_localzone() class Mt5Gateway(BaseGateway): """ VN Trader Gateway for MT5. """ default_setting: Dict[str, str] = { "通讯地址": "localhost", "请求端口": "6888", "订阅端口": "8666", } exchanges: List[Exchange] = [Exchange.OTC] def __init__(self, event_engine): """Constructor""" super().__init__(event_engine, "MT5") self.callbacks: Dict[str, Callable] = { "account": self.on_account_info, "price": self.on_price_info, "order": self.on_order_info, "position": self.on_position_info } self.client = Mt5Client(self) self.order_count = 0 self.local_sys_map: Dict[str, str] = {} self.sys_local_map: Dict[str, str] = {} self.position_symbols: Set[str] = set() self.orders: Dict[str, OrderData] = {} def connect(self, setting: dict) -> None: """""" address = setting["通讯地址"] req_port = setting["请求端口"] sub_port = setting["订阅端口"] req_address = f"tcp://{address}:{req_port}" sub_address = f"tcp://{address}:{sub_port}" self.client.start(req_address, sub_address) self.query_contract() self.query_order() def subscribe(self, req: SubscribeRequest) -> None: """""" mt5_req = { "type": FUNCTION_SUBSCRIBE, "symbol": req.symbol.replace('-', '.') } self.client.send_request(mt5_req) def send_order(self, req: OrderRequest) -> str: """""" cmd = ORDERTYPE_VT2MT.get((req.direction, req.type), None) if req.type == OrderType.FOK or req.type == OrderType.FAK or req.type == OrderType.RFQ: self.write_log(f"不支持的委托类型:{req.type.value}") return "" local_id = self.new_orderid() mt5_req = { "type": FUNCTION_SENDORDER, "symbol": req.symbol.replace('-', '.'), "cmd": cmd, "price": req.price, "volume": req.volume, "comment": local_id, } packet = self.client.send_request(mt5_req) result = packet["data"]["result"] comment = packet["data"]["comment"] order = req.create_order_data(local_id, self.gateway_name) if result: order.status = Status.SUBMITTING else: order.status = Status.REJECTED self.write_log(f"委托{local_id}拒单,原因{comment}") self.on_order(order) self.orders[local_id] = order return order.vt_orderid def new_orderid(self) -> int: """""" prefix = datetime.now().strftime("%Y%m%d_%H%M%S_") self.order_count += 1 suffix = str(self.order_count).rjust(4, "0") orderid = prefix + suffix return orderid def cancel_order(self, req: CancelRequest) -> None: """""" if req.orderid not in self.local_sys_map: self.write_log(f"委托撤单失败,找不到{req.orderid}对应的系统委托号") return sys_id = self.local_sys_map[req.orderid] mt5_req = { "type": FUNCTION_CANCELORDER, "ticket": int(sys_id) } packet = self.client.send_request(mt5_req) result = packet["data"]["result"] if result is True: self.write_log(f"委托撤单成功{req.orderid}") elif result is False: self.write_log(f"委托撤单失败{req.orderid}") def query_contract(self) -> None: """""" mt5_req = {"type": FUNCTION_QUERYCONTRACT} packet = self.client.send_request(mt5_req) if packet: self.write_log("MT5连接成功") for d in packet["data"]: contract = ContractData( symbol=d["symbol"].replace('.', '-'), exchange=Exchange.OTC, name=d["symbol"].replace('.', '-'), product=Product.FOREX, size=d["lot_size"], pricetick=pow(10, -d["digits"]), min_volume=d["min_lot"], net_position=True, stop_supported=True, history_data=True, gateway_name=self.gateway_name, ) self.on_contract(contract) self.write_log("合约信息查询成功") def query_order(self) -> None: """""" mt5_req = {"type": FUNCTION_QUERYORDER} packet = self.client.send_request(mt5_req) for d in packet.get("data", []): direction, order_type = ORDERTYPE_MT2VT[d["order_type"]] sys_id = str(d["order"]) if d["order_comment"]: local_id = d["order_comment"] else: local_id = sys_id self.local_sys_map[local_id] = sys_id self.sys_local_map[sys_id] = local_id order = OrderData( symbol=d["symbol"].replace('.', '-'), exchange=Exchange.OTC, orderid=local_id, direction=direction, type=order_type, price=d["order_price"], volume=d["order_volume_initial"], traded=d["order_volume_initial"] - d["order_volume_current"], status=STATUS_MT2VT.get(d["order_state"], Status.SUBMITTING), datetime=generate_datetime(d["order_time_setup"]), gateway_name=self.gateway_name ) self.orders[local_id] = order self.on_order(order) self.write_log("委托信息查询成功") def query_account(self) -> None: """""" pass def query_position(self) -> None: """""" pass def query_history(self, req: HistoryRequest) -> List[BarData]: """ Query bar history data. """ history = [] start_time = generate_datetime3(req.start) end_time = generate_datetime3(req.end) mt5_req = { "type": FUNCTION_QUERYHISTORY, "symbol": req.symbol.replace('-', '.'), "interval": INTERVAL_VT2MT[req.interval], "start_time": start_time, "end_time": end_time, } packet = self.client.send_request(mt5_req) if packet["result"] == -1: self.write_log("获取历史数据失败") else: for d in packet["data"]: bar = BarData( symbol=req.symbol.replace('.', '-'), exchange=Exchange.OTC, datetime=generate_datetime2(d["time"]), interval=req.interval, volume=d["real_volume"], open_price=d["open"], high_price=d["high"], low_price=d["low"], close_price=d["close"], gateway_name=self.gateway_name ) history.append(bar) data = packet["data"] begin = generate_datetime2(data[0]["time"]) end = generate_datetime2(data[-1]["time"]) msg = f"获取历史数据成功,{req.symbol.replace(".","-")} - {req.interval.value},{begin} - {end}" self.write_log(msg) return history def close(self) -> None: """""" self.client.stop() self.client.join() def callback(self, packet: dict) -> None: """""" type_ = packet["type"] callback_func = self.callbacks.get(type_, None) if callback_func: callback_func(packet) def on_order_info(self, packet: dict) -> None: """""" data = packet["data"] if not data["order"]: if data["trans_type"] == TRADE_TRANSACTION_REQUEST: local_id = data["request_comment"] order = self.orders.get(local_id, None) if local_id and order: order_id = str(data["result_order"]) if data["result_order"] and self.sys_local_map[order_id] == order_id: order.orderid = local_id order.traded = data["result_volume"] if order.traded == order.volume: order.status = Status.ALLTRADED else: order.status = Status.PARTTRADED self.on_order(order) trade = TradeData( symbol=order.symbol, exchange=order.exchange, direction=order.direction, orderid=data["request_comment"], tradeid=data["result_deal"], price=data["result_price"], volume=data["result_volume"], datetime=LOCAL_TZ.localize(datetime.now()), gateway_name=self.gateway_name ) self.on_trade(trade) elif data["result_retcode"] == TRADE_RETCODE_MARKET_CLOSED: order.status = Status.REJECTED self.write_log(f"委托{local_id}拒单,原因market_closed") self.on_order(order) return trans_type = data["trans_type"] # Map sys and local orderid if trans_type == TRADE_TRANSACTION_ORDER_ADD: sys_id = str(data["order"]) local_id = data["order_comment"] if not local_id: local_id = sys_id self.local_sys_map[local_id] = sys_id self.sys_local_map[sys_id] = local_id order = self.orders.get(local_id, None) if local_id and order: order.datetime = generate_datetime(data["order_time_setup"]) # Update order data elif trans_type in {TRADE_TRANSACTION_ORDER_UPDATE, TRADE_TRANSACTION_ORDER_DELETE}: sysid = str(data["order"]) local_id = self.sys_local_map[sysid] order = self.orders.get(local_id, None) if not order: direction, order_type = ORDERTYPE_MT2VT[data["order_type"]] order = OrderData( symbol=data["symbol"].replace('.', '-'), exchange=Exchange.OTC, orderid=local_id, type=order_type, direction=direction, price=data["order_price"], volume=data["order_volume_initial"], gateway_name=self.gateway_name ) self.orders[local_id] = order if data["order_time_setup"]: order.datetime = generate_datetime(data["order_time_setup"]) if data["trans_state"] in STATUS_MT2VT: order.status = STATUS_MT2VT[data["trans_state"]] self.on_order(order) # Update trade data elif trans_type == TRADE_TRANSACTION_HISTORY_ADD: sysid = str(data["order"]) local_id = self.sys_local_map[sysid] order = self.orders.get(local_id, None) if order: if data["order_time_setup"]: order.datetime = generate_datetime(data["order_time_setup"]) trade = TradeData( symbol=order.symbol.replace('.', '-'), exchange=order.exchange, direction=order.direction, orderid=order.orderid, tradeid=data["deal"], price=data["trans_price"], volume=data["trans_volume"], datetime=LOCAL_TZ.localize(datetime.now()), gateway_name=self.gateway_name ) order.traded = trade.volume self.on_order(order) self.on_trade(trade) def on_account_info(self, packet: dict) -> None: """""" data = packet["data"] account = AccountData( accountid=data["name"], balance=data["balance"], frozen=data["margin"], gateway_name=self.gateway_name ) self.on_account(account) def on_position_info(self, packet: dict) -> None: """""" positions = {} data = packet.get("data", []) for d in data: position = PositionData( symbol=d["symbol"].replace('.', '-'), exchange=Exchange.OTC, direction=Direction.NET, gateway_name=self.gateway_name ) if d["type"] == POSITION_TYPE_BUY: position.volume = d["volume"] else: position.volume = -d["volume"] position.price = d["price"] position.pnl = d["current_profit"] positions[position.symbol] = position for symbol in self.position_symbols: if symbol not in positions: position = PositionData( symbol=symbol, exchange=Exchange.OTC, direction=Direction.NET, gateway_name=self.gateway_name ) positions[symbol] = position for position in positions.values(): self.position_symbols.add(position.symbol) self.on_position(position) def on_price_info(self, packet: dict) -> None: """""" if "data" not in packet: return for d in packet["data"]: tick = TickData( symbol=d["symbol"].replace('.', '-'), exchange=Exchange.OTC, name=d["symbol"].replace('.', '-'), bid_price_1=d["bid"], ask_price_1=d["ask"], volume=d["last_volume"], datetime=datetime.now(), gateway_name=self.gateway_name ) if tick.last_price: tick.last_price = d["last"] tick.high_price = d["last_high"] tick.low_price = d["last_low"] else: tick.last_price = (d["bid"] + d["ask"]) / 2 tick.high_price = (d["bid_high"] + d["ask_high"]) / 2 tick.low_price = (d["bid_low"] + d["ask_low"]) / 2 self.on_tick(tick) class Mt5Client: """""" def __init__(self, gateway: Mt5Gateway): """Constructor""" self.gateway: Mt5Gateway = gateway self.context: zmq.Context = zmq.Context() self.socket_req: zmq.Socket = self.context.socket(zmq.REQ) self.socket_sub: zmq.Socket = self.context.socket(zmq.SUB) self.socket_sub.setsockopt_string(zmq.SUBSCRIBE, "") self.active: bool = False self.thread: threading.Thread = None self.lock: threading.Lock = threading.Lock() def start(self, req_address: str, sub_address: str) -> None: """ Start RpcClient """ if self.active: return # Connect zmq port self.socket_req.connect(req_address) self.socket_sub.connect(sub_address) # Start RpcClient status self.active = True # Start RpcClient thread self.thread = threading.Thread(target=self.run) self.thread.start() def stop(self) -> None: """ Stop RpcClient """ if not self.active: return self.active = False def join(self) -> None: """""" if self.thread and self.thread.is_alive(): self.thread.join() self.thread = None def run(self) -> None: """ Run RpcClient function """ while self.active: if not self.socket_sub.poll(1000): continue data = self.socket_sub.recv_json(flags=NOBLOCK) self.callback(data) # Close socket self.socket_req.close() self.socket_sub.close() def callback(self, data: Dict) -> None: """ Callable function """ self.gateway.callback(data) def send_request(self, req: Dict) -> Dict: """""" if not self.active: return {} self.socket_req.send_json(req) data = self.socket_req.recv_json() return data def generate_datetime(timestamp: int) -> datetime: """""" dt = datetime.fromtimestamp(timestamp) dt = LOCAL_TZ.localize(dt) return dt def generate_datetime2(timestamp: int) -> datetime: """""" dt = datetime.strptime(str(timestamp), "%Y.%m.%d %H:%M") utc_dt = dt.replace(tzinfo=pytz.utc) local_tz = LOCAL_TZ.normalize(utc_dt.astimezone(LOCAL_TZ)) return local_tz def generate_datetime3(datetime: datetime) -> str: """""" utc_tz = pytz.utc.normalize(datetime.astimezone(pytz.utc)) utc_tz = utc_tz.replace(tzinfo=None) dt = utc_tz.isoformat() dt = dt.replace('T', ' ') return dt @dataclass class OrderBuf: symbol: str type: OrderType = OrderType.LIMIT direction: Direction = None price: float = 0 volume: float = 0 traded: float = 0 status: Status = Status.SUBMITTING datetime: datetime = None
import threading from datetime import datetime from typing import Callable, Dict, Set, List from dataclasses import dataclass import zmq import zmq.auth from zmq.backend.cython.constants import NOBLOCK from tzlocal import get_localzone import pytz from vnpy.trader.constant import ( Direction, Exchange, OrderType, Product, Status, Interval ) from vnpy.trader.gateway import BaseGateway from vnpy.trader.object import ( TickData, OrderData, TradeData, PositionData, AccountData, ContractData, OrderRequest, CancelRequest, SubscribeRequest, HistoryRequest, BarData ) PERIOD_M1 = 1 PERIOD_H1 = 16385 PERIOD_D1 = 16408 FUNCTION_QUERYCONTRACT = 0 FUNCTION_QUERYORDER = 1 FUNCTION_QUERYHISTORY = 2 FUNCTION_SUBSCRIBE = 3 FUNCTION_SENDORDER = 4 FUNCTION_CANCELORDER = 5 ORDER_STATE_STARTED = 0 ORDER_STATE_PLACED = 1 ORDER_STATE_CANCELED = 2 ORDER_STATE_PARTIAL = 3 ORDER_STATE_FILLED = 4 ORDER_STATE_REJECTED = 5 POSITION_TYPE_BUY = 0 POSITION_TYPE_SELL = 1 TRADE_TRANSACTION_ORDER_ADD = 0 TRADE_TRANSACTION_ORDER_UPDATE = 1 TRADE_TRANSACTION_ORDER_DELETE = 2 TRADE_TRANSACTION_HISTORY_ADD = 6 TRADE_TRANSACTION_REQUEST = 10 TRADE_RETCODE_MARKET_CLOSED = 10018 TYPE_BUY = 0 TYPE_SELL = 1 TYPE_BUY_LIMIT = 2 TYPE_SELL_LIMIT = 3 TYPE_BUY_STOP = 4 TYPE_SELL_STOP = 5 INTERVAL_VT2MT = { Interval.MINUTE: PERIOD_M1, Interval.HOUR: PERIOD_H1, Interval.DAILY: PERIOD_D1, } STATUS_MT2VT = { ORDER_STATE_STARTED: Status.SUBMITTING, ORDER_STATE_PLACED: Status.NOTTRADED, ORDER_STATE_CANCELED: Status.CANCELLED, ORDER_STATE_PARTIAL: Status.PARTTRADED, ORDER_STATE_FILLED: Status.ALLTRADED, ORDER_STATE_REJECTED: Status.REJECTED } ORDERTYPE_MT2VT = { TYPE_BUY: (Direction.LONG, OrderType.MARKET), TYPE_SELL: (Direction.SHORT, OrderType.MARKET), TYPE_BUY_LIMIT: (Direction.LONG, OrderType.LIMIT), TYPE_SELL_LIMIT: (Direction.SHORT, OrderType.LIMIT), TYPE_BUY_STOP: (Direction.LONG, OrderType.STOP), TYPE_SELL_STOP: (Direction.SHORT, OrderType.STOP), } ORDERTYPE_VT2MT = {v: k for k, v in ORDERTYPE_MT2VT.items()} LOCAL_TZ = get_localzone() class Mt5Gateway(BaseGateway): """ VN Trader Gateway for MT5. """ default_setting: Dict[str, str] = { "通讯地址": "localhost", "请求端口": "6888", "订阅端口": "8666", } exchanges: List[Exchange] = [Exchange.OTC] def __init__(self, event_engine): """Constructor""" super().__init__(event_engine, "MT5") self.callbacks: Dict[str, Callable] = { "account": self.on_account_info, "price": self.on_price_info, "order": self.on_order_info, "position": self.on_position_info } self.client = Mt5Client(self) self.order_count = 0 self.local_sys_map: Dict[str, str] = {} self.sys_local_map: Dict[str, str] = {} self.position_symbols: Set[str] = set() self.orders: Dict[str, OrderData] = {} def connect(self, setting: dict) -> None: """""" address = setting["通讯地址"] req_port = setting["请求端口"] sub_port = setting["订阅端口"] req_address = f"tcp://{address}:{req_port}" sub_address = f"tcp://{address}:{sub_port}" self.client.start(req_address, sub_address) self.query_contract() self.query_order() def subscribe(self, req: SubscribeRequest) -> None: """""" mt5_req = { "type": FUNCTION_SUBSCRIBE, "symbol": req.symbol.replace('-', '.') } self.client.send_request(mt5_req) def send_order(self, req: OrderRequest) -> str: """""" cmd = ORDERTYPE_VT2MT.get((req.direction, req.type), None) if req.type == OrderType.FOK or req.type == OrderType.FAK or req.type == OrderType.RFQ: self.write_log(f"不支持的委托类型:{req.type.value}") return "" local_id = self.new_orderid() mt5_req = { "type": FUNCTION_SENDORDER, "symbol": req.symbol.replace('-', '.'), "cmd": cmd, "price": req.price, "volume": req.volume, "comment": local_id, } packet = self.client.send_request(mt5_req) result = packet["data"]["result"] comment = packet["data"]["comment"] order = req.create_order_data(local_id, self.gateway_name) if result: order.status = Status.SUBMITTING else: order.status = Status.REJECTED self.write_log(f"委托{local_id}拒单,原因{comment}") self.on_order(order) self.orders[local_id] = order return order.vt_orderid def new_orderid(self) -> int: """""" prefix = datetime.now().strftime("%Y%m%d_%H%M%S_") self.order_count += 1 suffix = str(self.order_count).rjust(4, "0") orderid = prefix + suffix return orderid def cancel_order(self, req: CancelRequest) -> None: """""" if req.orderid not in self.local_sys_map: self.write_log(f"委托撤单失败,找不到{req.orderid}对应的系统委托号") return sys_id = self.local_sys_map[req.orderid] mt5_req = { "type": FUNCTION_CANCELORDER, "ticket": int(sys_id) } packet = self.client.send_request(mt5_req) result = packet["data"]["result"] if result is True: self.write_log(f"委托撤单成功{req.orderid}") elif result is False: self.write_log(f"委托撤单失败{req.orderid}") def query_contract(self) -> None: """""" mt5_req = {"type": FUNCTION_QUERYCONTRACT} packet = self.client.send_request(mt5_req) if packet: self.write_log("MT5连接成功") for d in packet["data"]: contract = ContractData( symbol=d["symbol"].replace('.', '-'), exchange=Exchange.OTC, name=d["symbol"].replace('.', '-'), product=Product.FOREX, size=d["lot_size"], pricetick=pow(10, -d["digits"]), min_volume=d["min_lot"], net_position=True, stop_supported=True, history_data=True, gateway_name=self.gateway_name, ) self.on_contract(contract) self.write_log("合约信息查询成功") def query_order(self) -> None: """""" mt5_req = {"type": FUNCTION_QUERYORDER} packet = self.client.send_request(mt5_req) for d in packet.get("data", []): direction, order_type = ORDERTYPE_MT2VT[d["order_type"]] sys_id = str(d["order"]) if d["order_comment"]: local_id = d["order_comment"] else: local_id = sys_id self.local_sys_map[local_id] = sys_id self.sys_local_map[sys_id] = local_id order = OrderData( symbol=d["symbol"].replace('.', '-'), exchange=Exchange.OTC, orderid=local_id, direction=direction, type=order_type, price=d["order_price"], volume=d["order_volume_initial"], traded=d["order_volume_initial"] - d["order_volume_current"], status=STATUS_MT2VT.get(d["order_state"], Status.SUBMITTING), datetime=generate_datetime(d["order_time_setup"]), gateway_name=self.gateway_name ) self.orders[local_id] = order self.on_order(order) self.write_log("委托信息查询成功") def query_account(self) -> None: """""" pass def query_position(self) -> None: """""" pass def query_history(self, req: HistoryRequest) -> List[BarData]: """ Query bar history data. """ history = [] start_time = generate_datetime3(req.start) end_time = generate_datetime3(req.end) mt5_req = { "type": FUNCTION_QUERYHISTORY, "symbol": req.symbol.replace('-', '.'), "interval": INTERVAL_VT2MT[req.interval], "start_time": start_time, "end_time": end_time, } packet = self.client.send_request(mt5_req) if packet["result"] == -1: self.write_log("获取历史数据失败") else: for d in packet["data"]: bar = BarData( symbol=req.symbol.replace('.', '-'), exchange=Exchange.OTC, datetime=generate_datetime2(d["time"]), interval=req.interval, volume=d["real_volume"], open_price=d["open"], high_price=d["high"], low_price=d["low"], close_price=d["close"], gateway_name=self.gateway_name ) history.append(bar) data = packet["data"] begin = generate_datetime2(data[0]["time"]) end = generate_datetime2(data[-1]["time"]) msg = f"获取历史数据成功,{req.symbol.replace('.','-')} - {req.interval.value},{begin} - {end}" self.write_log(msg) return history def close(self) -> None: """""" self.client.stop() self.client.join() def callback(self, packet: dict) -> None: """""" type_ = packet["type"] callback_func = self.callbacks.get(type_, None) if callback_func: callback_func(packet) def on_order_info(self, packet: dict) -> None: """""" data = packet["data"] if not data["order"]: if data["trans_type"] == TRADE_TRANSACTION_REQUEST: local_id = data["request_comment"] order = self.orders.get(local_id, None) if local_id and order: order_id = str(data["result_order"]) if data["result_order"] and self.sys_local_map[order_id] == order_id: order.orderid = local_id order.traded = data["result_volume"] if order.traded == order.volume: order.status = Status.ALLTRADED else: order.status = Status.PARTTRADED self.on_order(order) trade = TradeData( symbol=order.symbol, exchange=order.exchange, direction=order.direction, orderid=data["request_comment"], tradeid=data["result_deal"], price=data["result_price"], volume=data["result_volume"], datetime=LOCAL_TZ.localize(datetime.now()), gateway_name=self.gateway_name ) self.on_trade(trade) elif data["result_retcode"] == TRADE_RETCODE_MARKET_CLOSED: order.status = Status.REJECTED self.write_log(f"委托{local_id}拒单,原因market_closed") self.on_order(order) return trans_type = data["trans_type"] # Map sys and local orderid if trans_type == TRADE_TRANSACTION_ORDER_ADD: sys_id = str(data["order"]) local_id = data["order_comment"] if not local_id: local_id = sys_id self.local_sys_map[local_id] = sys_id self.sys_local_map[sys_id] = local_id order = self.orders.get(local_id, None) if local_id and order: order.datetime = generate_datetime(data["order_time_setup"]) # Update order data elif trans_type in {TRADE_TRANSACTION_ORDER_UPDATE, TRADE_TRANSACTION_ORDER_DELETE}: sysid = str(data["order"]) local_id = self.sys_local_map[sysid] order = self.orders.get(local_id, None) if not order: direction, order_type = ORDERTYPE_MT2VT[data["order_type"]] order = OrderData( symbol=data["symbol"].replace('.', '-'), exchange=Exchange.OTC, orderid=local_id, type=order_type, direction=direction, price=data["order_price"], volume=data["order_volume_initial"], gateway_name=self.gateway_name ) self.orders[local_id] = order if data["order_time_setup"]: order.datetime = generate_datetime(data["order_time_setup"]) if data["trans_state"] in STATUS_MT2VT: order.status = STATUS_MT2VT[data["trans_state"]] self.on_order(order) # Update trade data elif trans_type == TRADE_TRANSACTION_HISTORY_ADD: sysid = str(data["order"]) local_id = self.sys_local_map[sysid] order = self.orders.get(local_id, None) if order: if data["order_time_setup"]: order.datetime = generate_datetime(data["order_time_setup"]) trade = TradeData( symbol=order.symbol.replace('.', '-'), exchange=order.exchange, direction=order.direction, orderid=order.orderid, tradeid=data["deal"], price=data["trans_price"], volume=data["trans_volume"], datetime=LOCAL_TZ.localize(datetime.now()), gateway_name=self.gateway_name ) order.traded = trade.volume self.on_order(order) self.on_trade(trade) def on_account_info(self, packet: dict) -> None: """""" data = packet["data"] account = AccountData( accountid=data["name"], balance=data["balance"], frozen=data["margin"], gateway_name=self.gateway_name ) self.on_account(account) def on_position_info(self, packet: dict) -> None: """""" positions = {} data = packet.get("data", []) for d in data: position = PositionData( symbol=d["symbol"].replace('.', '-'), exchange=Exchange.OTC, direction=Direction.NET, gateway_name=self.gateway_name ) if d["type"] == POSITION_TYPE_BUY: position.volume = d["volume"] else: position.volume = -d["volume"] position.price = d["price"] position.pnl = d["current_profit"] positions[position.symbol] = position for symbol in self.position_symbols: if symbol not in positions: position = PositionData( symbol=symbol, exchange=Exchange.OTC, direction=Direction.NET, gateway_name=self.gateway_name ) positions[symbol] = position for position in positions.values(): self.position_symbols.add(position.symbol) self.on_position(position) def on_price_info(self, packet: dict) -> None: """""" if "data" not in packet: return for d in packet["data"]: tick = TickData( symbol=d["symbol"].replace('.', '-'), exchange=Exchange.OTC, name=d["symbol"].replace('.', '-'), bid_price_1=d["bid"], ask_price_1=d["ask"], volume=d["last_volume"], datetime=datetime.now(), gateway_name=self.gateway_name ) if tick.last_price: tick.last_price = d["last"] tick.high_price = d["last_high"] tick.low_price = d["last_low"] else: tick.last_price = (d["bid"] + d["ask"]) / 2 tick.high_price = (d["bid_high"] + d["ask_high"]) / 2 tick.low_price = (d["bid_low"] + d["ask_low"]) / 2 self.on_tick(tick) class Mt5Client: """""" def __init__(self, gateway: Mt5Gateway): """Constructor""" self.gateway: Mt5Gateway = gateway self.context: zmq.Context = zmq.Context() self.socket_req: zmq.Socket = self.context.socket(zmq.REQ) self.socket_sub: zmq.Socket = self.context.socket(zmq.SUB) self.socket_sub.setsockopt_string(zmq.SUBSCRIBE, "") self.active: bool = False self.thread: threading.Thread = None self.lock: threading.Lock = threading.Lock() def start(self, req_address: str, sub_address: str) -> None: """ Start RpcClient """ if self.active: return # Connect zmq port self.socket_req.connect(req_address) self.socket_sub.connect(sub_address) # Start RpcClient status self.active = True # Start RpcClient thread self.thread = threading.Thread(target=self.run) self.thread.start() def stop(self) -> None: """ Stop RpcClient """ if not self.active: return self.active = False def join(self) -> None: """""" if self.thread and self.thread.is_alive(): self.thread.join() self.thread = None def run(self) -> None: """ Run RpcClient function """ while self.active: if not self.socket_sub.poll(1000): continue data = self.socket_sub.recv_json(flags=NOBLOCK) self.callback(data) # Close socket self.socket_req.close() self.socket_sub.close() def callback(self, data: Dict) -> None: """ Callable function """ self.gateway.callback(data) def send_request(self, req: Dict) -> Dict: """""" if not self.active: return {} self.socket_req.send_json(req) data = self.socket_req.recv_json() return data def generate_datetime(timestamp: int) -> datetime: """""" dt = datetime.fromtimestamp(timestamp) dt = LOCAL_TZ.localize(dt) return dt def generate_datetime2(timestamp: int) -> datetime: """""" dt = datetime.strptime(str(timestamp), "%Y.%m.%d %H:%M") utc_dt = dt.replace(tzinfo=pytz.utc) local_tz = LOCAL_TZ.normalize(utc_dt.astimezone(LOCAL_TZ)) return local_tz def generate_datetime3(datetime: datetime) -> str: """""" utc_tz = pytz.utc.normalize(datetime.astimezone(pytz.utc)) utc_tz = utc_tz.replace(tzinfo=None) dt = utc_tz.isoformat() dt = dt.replace('T', ' ') return dt @dataclass class OrderBuf: symbol: str type: OrderType = OrderType.LIMIT direction: Direction = None price: float = 0 volume: float = 0 traded: float = 0 status: Status = Status.SUBMITTING datetime: datetime = None
import time import logging import warnings import psutil from signal import signal, SIGINT from py3nvml.py3nvml import * from typing import Dict, Optional from kge import Config, Dataset from kge.distributed.parameter_server import init_torch_server, init_lapse_scheduler from kge.distributed.worker_process import WorkerProcessPool from kge.distributed.work_scheduler import WorkScheduler from kge.distributed.misc import get_num_keys import torch from torch import multiprocessing as mp def monitor_hardware(folder, interval=1): def bytes_to_mb(bytes_amount): return round(bytes_amount / 1024 / 1024, 2) logger = logging.getLogger("hardware_monitor") logger.setLevel(logging.DEBUG) # create file handler which logs even debug messages fh = logging.FileHandler(os.path.join(folder, "hardware_monitor.log")) fh.setLevel(logging.DEBUG) logger.addHandler(fh) # let's monitor the default connection between OUR two servers # todo: just monitor all interfaces later on interface = "enp130s0f0" while True: time.sleep(interval) cpu_percentage = psutil.cpu_percent() memory_percentage = psutil.virtual_memory().percent network_info = psutil.net_io_counters() bytes_sent = network_info.bytes_sent bytes_recv = network_info.bytes_recv # timestamp;cpu%;mem%;net_sent;net_recvm msg = f"{time.time()};{cpu_percentage};{memory_percentage};{bytes_to_mb(bytes_sent)};{bytes_to_mb(bytes_recv)}" network_info = psutil.net_io_counters(pernic=True) if interface in network_info.keys(): bytes_sent = network_info[interface].bytes_sent bytes_recv = network_info[interface].bytes_recv msg += f";{bytes_to_mb(bytes_sent)};{bytes_to_mb(bytes_recv)}" logger.info( msg=msg ) def monitor_gpus(folder, interval=1): try: nvmlInit() except Exception: print("could not initialize GPU monitor") return device_count = nvmlDeviceGetCount() if device_count == 0: return logger = logging.getLogger("gpu_monitor") logger.setLevel(logging.DEBUG) # create file handler which logs even debug messages fh = logging.FileHandler(os.path.join(folder, "gpu_monitor.log")) fh.setLevel(logging.DEBUG) logger.addHandler(fh) while True: time.sleep(interval) for i in range(device_count): handle = nvmlDeviceGetHandleByIndex(i) proc_res = nvmlDeviceGetComputeRunningProcesses(handle) mem_per_process = list( map(lambda obj: (obj.pid, obj.usedGpuMemory), proc_res) ) res = nvmlDeviceGetUtilizationRates(handle) mem_res = nvmlDeviceGetMemoryInfo(handle) # timestamp;device_id;gpu_util;gpu_mem_util;gpu_temp;mem_per_process logger.info( f"{time.time()};{i};{res.gpu};{round((mem_res.used/mem_res.total)*100)};{mem_per_process}" ) def create_and_run_distributed( config: Config, dataset: Optional[Dataset] = None, checkpoint: Optional[Dict] = None ): # setting num eval workers to 1 if < 1 if config.get("job.distributed.num_eval_workers") < 1: warnings.warn("Need to have at least one worker for evaluation." "Setting job.distributed.num_eval_workers to 1") config.set("job.distributed.num_eval_workers", 1) # setting num workers to 1 if < 1 if config.get("job.distributed.num_workers") < 1: warnings.warn("Need to have at least one worker for training." "Setting job.distribtued.num_workers to 1") config.set("job.distributed.num_workers", 1) # setting num workers per machine to num workers if < 0 if config.get("job.distributed.num_workers_machine") <= 0: config.set("job.distributed.num_workers_machine", config.get("job.distributed.num_workers")) # setting already initialized workers if < 0 if config.get("job.distributed.already_init_workers") < 0: config.set("job.distributed.already_init_workers", config.get("job.distributed.machine_id") * config.get("job.distributed.num_workers_machine")) # specific settings for valid only jobs if config.get("job.type") in ["valid", "test", "eval"]: config.set("job.distributed.parameter_server", "shared") num_eval_workers = config.get("job.distributed.num_eval_workers") config.set("job.distributed.num_workers", num_eval_workers) config.set("job.distributed.num_workers_machine", num_eval_workers) config.set("job.distributed.num_machines", 1) config.set("job.distributed.gloo_socket_ifname", "lo") config.set("job.distributed.master_ip", "127.0.0.1") config.set(f"{config.get("model")}.create_eval", True) os.environ["OMP_NUM_THREADS"] = str( config.get("job.distributed.num_threads_per_process") ) os.environ["GLOO_SOCKET_IFNAME"] = config.get("job.distributed.gloo_socket_ifname") if ( config.get("job.distributed.repartition_epoch") and config.get("job.distributed.partition_type") == "stratification" ): # with stratificaton we have a lot of open files that need to be shared # between processes. Some servers don't allow that. Therefore set sharing # strategy to file_system to avoid too many open files error torch.multiprocessing.set_sharing_strategy("file_system") # catch interrupt (to shut down lapse and other processes) processes = [] monitoring_processes = [] worker_process_pool = None def kill_processes(signal_received, frame): print("\nSIGINT or CTRL-C detected. Shutting down all processes and exiting...") for process in processes: if process is not None: try: process.kill() except AttributeError: print("process already killed") for process in monitoring_processes: if process is not None: process.kill() if worker_process_pool is not None: worker_process_pool.kill() exit(0) signal(SIGINT, kill_processes) if config.get("job.type") == "train": # start hardware monitoring monitor_process = mp.Process( target=monitor_hardware, args=(config.folder, 0.5), daemon=True ) monitoring_processes.append(monitor_process) monitor_process.start() gpu_monitor_process = mp.Process( target=monitor_gpus, args=(config.folder, 1), daemon=True ) monitoring_processes.append(gpu_monitor_process) gpu_monitor_process.start() if config.get("job.distributed.machine_id") == 0: num_keys = get_num_keys(config, dataset) if config.get("job.distributed.parameter_server") == "lapse": p = mp.Process( target=init_lapse_scheduler, args=( config, num_keys, ), daemon=True, ) processes.append(p) p.start() elif config.get("job.distributed.parameter_server") == "torch": p = mp.Process( target=init_torch_server, args=( config, num_keys, ), daemon=True, ) processes.append(p) p.start() # create a work scheduler print("init scheduler") scheduler_init_time = time.time() scheduler = WorkScheduler.create(config=config, dataset=dataset) config.log(f"scheduler initialized after: {time.time()-scheduler_init_time}") print("start scheduler") scheduler_start_time = time.time() processes.append(scheduler) scheduler.start() config.log(f"scheduler start took: {time.time()-scheduler_start_time}") # create all train-workers in a worker pool worker_process_pool = WorkerProcessPool( config, dataset, checkpoint, ) valid_trace = worker_process_pool.join() for p in processes: p.join() if config.get("job.type") == "train": monitor_process.terminate() gpu_monitor_process.terminate() return valid_trace
import time import logging import warnings import psutil from signal import signal, SIGINT from py3nvml.py3nvml import * from typing import Dict, Optional from kge import Config, Dataset from kge.distributed.parameter_server import init_torch_server, init_lapse_scheduler from kge.distributed.worker_process import WorkerProcessPool from kge.distributed.work_scheduler import WorkScheduler from kge.distributed.misc import get_num_keys import torch from torch import multiprocessing as mp def monitor_hardware(folder, interval=1): def bytes_to_mb(bytes_amount): return round(bytes_amount / 1024 / 1024, 2) logger = logging.getLogger("hardware_monitor") logger.setLevel(logging.DEBUG) # create file handler which logs even debug messages fh = logging.FileHandler(os.path.join(folder, "hardware_monitor.log")) fh.setLevel(logging.DEBUG) logger.addHandler(fh) # let's monitor the default connection between OUR two servers # todo: just monitor all interfaces later on interface = "enp130s0f0" while True: time.sleep(interval) cpu_percentage = psutil.cpu_percent() memory_percentage = psutil.virtual_memory().percent network_info = psutil.net_io_counters() bytes_sent = network_info.bytes_sent bytes_recv = network_info.bytes_recv # timestamp;cpu%;mem%;net_sent;net_recvm msg = f"{time.time()};{cpu_percentage};{memory_percentage};{bytes_to_mb(bytes_sent)};{bytes_to_mb(bytes_recv)}" network_info = psutil.net_io_counters(pernic=True) if interface in network_info.keys(): bytes_sent = network_info[interface].bytes_sent bytes_recv = network_info[interface].bytes_recv msg += f";{bytes_to_mb(bytes_sent)};{bytes_to_mb(bytes_recv)}" logger.info( msg=msg ) def monitor_gpus(folder, interval=1): try: nvmlInit() except Exception: print("could not initialize GPU monitor") return device_count = nvmlDeviceGetCount() if device_count == 0: return logger = logging.getLogger("gpu_monitor") logger.setLevel(logging.DEBUG) # create file handler which logs even debug messages fh = logging.FileHandler(os.path.join(folder, "gpu_monitor.log")) fh.setLevel(logging.DEBUG) logger.addHandler(fh) while True: time.sleep(interval) for i in range(device_count): handle = nvmlDeviceGetHandleByIndex(i) proc_res = nvmlDeviceGetComputeRunningProcesses(handle) mem_per_process = list( map(lambda obj: (obj.pid, obj.usedGpuMemory), proc_res) ) res = nvmlDeviceGetUtilizationRates(handle) mem_res = nvmlDeviceGetMemoryInfo(handle) # timestamp;device_id;gpu_util;gpu_mem_util;gpu_temp;mem_per_process logger.info( f"{time.time()};{i};{res.gpu};{round((mem_res.used/mem_res.total)*100)};{mem_per_process}" ) def create_and_run_distributed( config: Config, dataset: Optional[Dataset] = None, checkpoint: Optional[Dict] = None ): # setting num eval workers to 1 if < 1 if config.get("job.distributed.num_eval_workers") < 1: warnings.warn("Need to have at least one worker for evaluation." "Setting job.distributed.num_eval_workers to 1") config.set("job.distributed.num_eval_workers", 1) # setting num workers to 1 if < 1 if config.get("job.distributed.num_workers") < 1: warnings.warn("Need to have at least one worker for training." "Setting job.distribtued.num_workers to 1") config.set("job.distributed.num_workers", 1) # setting num workers per machine to num workers if < 0 if config.get("job.distributed.num_workers_machine") <= 0: config.set("job.distributed.num_workers_machine", config.get("job.distributed.num_workers")) # setting already initialized workers if < 0 if config.get("job.distributed.already_init_workers") < 0: config.set("job.distributed.already_init_workers", config.get("job.distributed.machine_id") * config.get("job.distributed.num_workers_machine")) # specific settings for valid only jobs if config.get("job.type") in ["valid", "test", "eval"]: config.set("job.distributed.parameter_server", "shared") num_eval_workers = config.get("job.distributed.num_eval_workers") config.set("job.distributed.num_workers", num_eval_workers) config.set("job.distributed.num_workers_machine", num_eval_workers) config.set("job.distributed.num_machines", 1) config.set("job.distributed.gloo_socket_ifname", "lo") config.set("job.distributed.master_ip", "127.0.0.1") config.set(f"{config.get('model')}.create_eval", True) os.environ["OMP_NUM_THREADS"] = str( config.get("job.distributed.num_threads_per_process") ) os.environ["GLOO_SOCKET_IFNAME"] = config.get("job.distributed.gloo_socket_ifname") if ( config.get("job.distributed.repartition_epoch") and config.get("job.distributed.partition_type") == "stratification" ): # with stratificaton we have a lot of open files that need to be shared # between processes. Some servers don't allow that. Therefore set sharing # strategy to file_system to avoid too many open files error torch.multiprocessing.set_sharing_strategy("file_system") # catch interrupt (to shut down lapse and other processes) processes = [] monitoring_processes = [] worker_process_pool = None def kill_processes(signal_received, frame): print("\nSIGINT or CTRL-C detected. Shutting down all processes and exiting...") for process in processes: if process is not None: try: process.kill() except AttributeError: print("process already killed") for process in monitoring_processes: if process is not None: process.kill() if worker_process_pool is not None: worker_process_pool.kill() exit(0) signal(SIGINT, kill_processes) if config.get("job.type") == "train": # start hardware monitoring monitor_process = mp.Process( target=monitor_hardware, args=(config.folder, 0.5), daemon=True ) monitoring_processes.append(monitor_process) monitor_process.start() gpu_monitor_process = mp.Process( target=monitor_gpus, args=(config.folder, 1), daemon=True ) monitoring_processes.append(gpu_monitor_process) gpu_monitor_process.start() if config.get("job.distributed.machine_id") == 0: num_keys = get_num_keys(config, dataset) if config.get("job.distributed.parameter_server") == "lapse": p = mp.Process( target=init_lapse_scheduler, args=( config, num_keys, ), daemon=True, ) processes.append(p) p.start() elif config.get("job.distributed.parameter_server") == "torch": p = mp.Process( target=init_torch_server, args=( config, num_keys, ), daemon=True, ) processes.append(p) p.start() # create a work scheduler print("init scheduler") scheduler_init_time = time.time() scheduler = WorkScheduler.create(config=config, dataset=dataset) config.log(f"scheduler initialized after: {time.time()-scheduler_init_time}") print("start scheduler") scheduler_start_time = time.time() processes.append(scheduler) scheduler.start() config.log(f"scheduler start took: {time.time()-scheduler_start_time}") # create all train-workers in a worker pool worker_process_pool = WorkerProcessPool( config, dataset, checkpoint, ) valid_trace = worker_process_pool.join() for p in processes: p.join() if config.get("job.type") == "train": monitor_process.terminate() gpu_monitor_process.terminate() return valid_trace
from migrate_postgres import create_category, create_activity, activate_activity GUIAS = ["1_conceptos.tex", "13_excepciones.tex", "17_PilasColas.tex", "20_ordenamiento_recursivo.tex", "5_ciclos.tex", "9_diccionarios.tex", "10_contratos.tex", "14_objetos.tex", "18_Modelo_de_ejecucion.tex", "2_programacion.tex", "6_cadenas.tex", "11_archivos.tex", "15_herencia_polimorf.tex", "19_ordenamiento.tex", "3_funciones.tex", "7_tuplas_y_listas.tex", "12_procesamiento.tex", "16_Listas.tex", "4_decisiones.tex", "8_busqueda.tex", "ejercicios_c.tex" ] GUIAS.sort() initial_code = '''def main(): # codigo del alumne main() ''' c_initial_code = '''#include <stdio.h> int main(){ // codigo del alumne } ''' def main(): for guia in GUIAS: if guia != "ejercicios_c.tex": num = int(guia.split("_")[0]) cat_name = f"{num:02d}_{guia.split("_")[1]}".rstrip(".tex") language = "python" code = initial_code else: cat_name = f"{21}_ejercicios_c" language = "c" code = c_initial_code print(f"Guia " + guia) print(cat_name) categrory = create_category(cat_name) cat_id = categrory['id'] with open('/home/alepox/Desktop/wachen/algo1/apunte/' + guia) as guia_f: for i, ej in enumerate(guia_f.read().split("\\begin{ejercicio}")[1:]): print(f"Ejercicio {i + 1}") ej = ej.lstrip("\n") ej = ej.rstrip("\n") # ej = ej.rstrip("\\end{ejercicio}") ej = ej[:ej.index("\\end{ejercicio}")] print(len(ej)) my_activity = create_activity(f"Ejercicio {i + 1:02d}", ej, language, cat_id, 10, initial_code=code) activate_activity(my_activity['id']) print("SUCCESSSS") main()
from migrate_postgres import create_category, create_activity, activate_activity GUIAS = ["1_conceptos.tex", "13_excepciones.tex", "17_PilasColas.tex", "20_ordenamiento_recursivo.tex", "5_ciclos.tex", "9_diccionarios.tex", "10_contratos.tex", "14_objetos.tex", "18_Modelo_de_ejecucion.tex", "2_programacion.tex", "6_cadenas.tex", "11_archivos.tex", "15_herencia_polimorf.tex", "19_ordenamiento.tex", "3_funciones.tex", "7_tuplas_y_listas.tex", "12_procesamiento.tex", "16_Listas.tex", "4_decisiones.tex", "8_busqueda.tex", "ejercicios_c.tex" ] GUIAS.sort() initial_code = '''def main(): # codigo del alumne main() ''' c_initial_code = '''#include <stdio.h> int main(){ // codigo del alumne } ''' def main(): for guia in GUIAS: if guia != "ejercicios_c.tex": num = int(guia.split("_")[0]) cat_name = f"{num:02d}_{guia.split('_')[1]}".rstrip(".tex") language = "python" code = initial_code else: cat_name = f"{21}_ejercicios_c" language = "c" code = c_initial_code print(f"Guia " + guia) print(cat_name) categrory = create_category(cat_name) cat_id = categrory['id'] with open('/home/alepox/Desktop/wachen/algo1/apunte/' + guia) as guia_f: for i, ej in enumerate(guia_f.read().split("\\begin{ejercicio}")[1:]): print(f"Ejercicio {i + 1}") ej = ej.lstrip("\n") ej = ej.rstrip("\n") # ej = ej.rstrip("\\end{ejercicio}") ej = ej[:ej.index("\\end{ejercicio}")] print(len(ej)) my_activity = create_activity(f"Ejercicio {i + 1:02d}", ej, language, cat_id, 10, initial_code=code) activate_activity(my_activity['id']) print("SUCCESSSS") main()
import re f = lambda x: x.split(' ')[0].upper() + (' ' in x and f" {x.split(" ", 1)[1]}" or '') with open('raw_html_bits') as fd: raw_html = fd.read() al = re.findall(r"^<td axis=\".{6}\|\d+\|\d+/?\d*\|([^\"]+)\">([^<].+)</td>$", raw_html, re.MULTILINE) #al.insert(0xCB, ("BITS", "BITS")) #al.insert(0xDD, ("IX", "IX")) #al.insert(0xED, ("EXTD", "EXTD")) #al.insert(0xFD, ("IV", "IV")) print('\n\n'.join(f"//! {i:02X}; {f(v[1])}: {v[0]}" for i, v in enumerate(al)))
import re f = lambda x: x.split(' ')[0].upper() + (' ' in x and f" {x.split(' ', 1)[1]}" or '') with open('raw_html_bits') as fd: raw_html = fd.read() al = re.findall(r"^<td axis=\".{6}\|\d+\|\d+/?\d*\|([^\"]+)\">([^<].+)</td>$", raw_html, re.MULTILINE) #al.insert(0xCB, ("BITS", "BITS")) #al.insert(0xDD, ("IX", "IX")) #al.insert(0xED, ("EXTD", "EXTD")) #al.insert(0xFD, ("IV", "IV")) print('\n\n'.join(f"//! {i:02X}; {f(v[1])}: {v[0]}" for i, v in enumerate(al)))
import asyncio import nextcord from nextcord.ext import commands from ..tools.Embeds import embeds import uuid import datetime import os from ..tools.Athena import Athena class moderation(commands.Cog, embeds): LOAD = True NAME = "Moderation" def __init__(self, client: Athena): self.client = client @commands.command("archive") @commands.has_permissions(manage_guild=True) async def create_channel_history_archive(self, ctx, amount: int, channel: nextcord.TextChannel = None): """ Creates a text file with all the messages sent in a channel. :param ctx: :param amount: :param channel: :return: """ async with ctx.channel.typing(): if not channel: channel = ctx.channel embed = nextcord.Embed( title=f"Generating archive of: {channel.name}..." ) message = await ctx.send(embed=embed) await asyncio.sleep(3) self.client.console.info_log(f"Archive of {channel.name} requested; amount = {amount}.") h = await channel.history(limit=amount).flatten() h = list(reversed(h)) message_count = len([m for m in h if m.content]) with open( path := f"./temp/Archive_{datetime.datetime.now().strftime("%A-%B-%d-%Y")} {uuid.uuid4().hex[:4]}.txt", "a+") as archive_file: archive_file.writelines( f"[META] CREATED: {datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")}; MESSAGE COUNT: {message_count}; CHANNEL: {channel.name} [/META]\n" ) for message in h: if message.content: try: archive_file.writelines( f"[{message.created_at.strftime("%m/%d/%Y, %H:%M:%S")}]: {message.author.name}#{message.author.discriminator}: {message.content}\n") except: pass self.client.console.info_log(f"Archive of {channel.name} successfully created.") await ctx.send(file=nextcord.File(path)) embed.title = f"Archive of: {channel.name}" await message.edit(embed=embed) self.client.console.info_log(f"Archive of {channel.name} sent to {ctx.channel.name}") os.remove(path) self.client.console.info_log(f"Archive of {channel.name} temporary file successfully deleted.") @commands.command("athpurge", aliases=['athp']) @commands.has_permissions(administrator=True) async def purge_channel(self, ctx, amount: int = 0): """ deletes param:amount of messages from a channel :param ctx: :param amount: :return: """ try: await ctx.channel.purge(limit=amount) except: pass embed = nextcord.Embed(title=f"Purged [{amount}] messages.", colour=self.SUCCESS) embed.set_footer(text=f"Command issued by {ctx.message.author.name}#{ctx.message.author.discriminator}") await ctx.send(embed=embed)
import asyncio import nextcord from nextcord.ext import commands from ..tools.Embeds import embeds import uuid import datetime import os from ..tools.Athena import Athena class moderation(commands.Cog, embeds): LOAD = True NAME = "Moderation" def __init__(self, client: Athena): self.client = client @commands.command("archive") @commands.has_permissions(manage_guild=True) async def create_channel_history_archive(self, ctx, amount: int, channel: nextcord.TextChannel = None): """ Creates a text file with all the messages sent in a channel. :param ctx: :param amount: :param channel: :return: """ async with ctx.channel.typing(): if not channel: channel = ctx.channel embed = nextcord.Embed( title=f"Generating archive of: {channel.name}..." ) message = await ctx.send(embed=embed) await asyncio.sleep(3) self.client.console.info_log(f"Archive of {channel.name} requested; amount = {amount}.") h = await channel.history(limit=amount).flatten() h = list(reversed(h)) message_count = len([m for m in h if m.content]) with open( path := f"./temp/Archive_{datetime.datetime.now().strftime('%A-%B-%d-%Y')} {uuid.uuid4().hex[:4]}.txt", "a+") as archive_file: archive_file.writelines( f"[META] CREATED: {datetime.datetime.now().strftime('%m/%d/%Y, %H:%M:%S')}; MESSAGE COUNT: {message_count}; CHANNEL: {channel.name} [/META]\n" ) for message in h: if message.content: try: archive_file.writelines( f"[{message.created_at.strftime('%m/%d/%Y, %H:%M:%S')}]: {message.author.name}#{message.author.discriminator}: {message.content}\n") except: pass self.client.console.info_log(f"Archive of {channel.name} successfully created.") await ctx.send(file=nextcord.File(path)) embed.title = f"Archive of: {channel.name}" await message.edit(embed=embed) self.client.console.info_log(f"Archive of {channel.name} sent to {ctx.channel.name}") os.remove(path) self.client.console.info_log(f"Archive of {channel.name} temporary file successfully deleted.") @commands.command("athpurge", aliases=['athp']) @commands.has_permissions(administrator=True) async def purge_channel(self, ctx, amount: int = 0): """ deletes param:amount of messages from a channel :param ctx: :param amount: :return: """ try: await ctx.channel.purge(limit=amount) except: pass embed = nextcord.Embed(title=f"Purged [{amount}] messages.", colour=self.SUCCESS) embed.set_footer(text=f"Command issued by {ctx.message.author.name}#{ctx.message.author.discriminator}") await ctx.send(embed=embed)
import demistomock as demisto from CommonServerPython import * from CommonServerUserPython import * ''' IMPORTS ''' from datetime import datetime from typing import Dict, List, Any, Optional, Tuple import uuid import json import requests # disable insecure warnings requests.packages.urllib3.disable_warnings() ''' GLOBALS ''' if not demisto.params().get('port'): return_error('Set a port for the instance') URL = demisto.params()['server'].rstrip('/:') + ':' + demisto.params().get('port') + '/api/' API_KEY = str(demisto.params().get('key')) USE_SSL = not demisto.params().get('insecure') # determine a vsys or a device-group VSYS = demisto.params().get('vsys') if demisto.args() and demisto.args().get('device-group', None): DEVICE_GROUP = demisto.args().get('device-group') else: DEVICE_GROUP = demisto.params().get('device_group', None) # configuration check if DEVICE_GROUP and VSYS: return_error('Cannot configure both vsys and Device group. Set vsys for firewall, set Device group for Panorama.') if not DEVICE_GROUP and not VSYS: return_error('Set vsys for firewall or Device group for Panorama.') # setting security xpath relevant to FW or panorama management if DEVICE_GROUP: device_group_shared = DEVICE_GROUP.lower() if device_group_shared == 'shared': XPATH_SECURITY_RULES = "/config/shared/" DEVICE_GROUP = device_group_shared else: XPATH_SECURITY_RULES = "/config/devices/entry/device-group/entry[@name=\'" + DEVICE_GROUP + "\']/" else: XPATH_SECURITY_RULES = "/config/devices/entry/vsys/entry[@name=\'" + VSYS + "\']/rulebase/security/rules/entry" # setting objects xpath relevant to FW or panorama management if DEVICE_GROUP: device_group_shared = DEVICE_GROUP.lower() if DEVICE_GROUP == 'shared': XPATH_OBJECTS = "/config/shared/" DEVICE_GROUP = device_group_shared else: XPATH_OBJECTS = "/config/devices/entry/device-group/entry[@name=\'" + DEVICE_GROUP + "\']/" else: XPATH_OBJECTS = "/config/devices/entry/vsys/entry[@name=\'" + VSYS + "\']/" # Security rule arguments for output handling SECURITY_RULE_ARGS = { 'rulename': 'Name', 'source': 'Source', 'destination': 'Destination', 'negate_source': 'NegateSource', 'negate_destination': 'NegateDestination', 'action': 'Action', 'service': 'Service', 'disable': 'Disabled', 'application': 'Application', 'source_user': 'SourceUser', 'disable_server_response_inspection': 'DisableServerResponseInspection', 'description': 'Description', 'target': 'Target', 'log_forwarding': 'LogForwarding', 'log-setting': 'LogForwarding', 'tag': 'Tags' } PAN_OS_ERROR_DICT = { '1': 'Unknown command - The specific config or operational command is not recognized.', '2': 'Internal errors - Check with technical support when seeing these errors.', '3': 'Internal errors - Check with technical support when seeing these errors.', '4': 'Internal errors - Check with technical support when seeing these errors.', '5': 'Internal errors - Check with technical support when seeing these errors.', '6': 'Bad Xpath -The xpath specified in one or more attributes of the command is invalid.' 'Check the API browser for proper xpath values.', '7': 'Object not present - Object specified by the xpath is not present. For example,' 'entry[@name=value] where no object with name value is present.', '8': 'Object not unique - For commands that operate on a single object, the specified object is not unique.', '10': 'Reference count not zero - Object cannot be deleted as there are other objects that refer to it.' 'For example, address object still in use in policy.', '11': 'Internal error - Check with technical support when seeing these errors.', '12': 'Invalid object - Xpath or element values provided are not complete.', '14': 'Operation not possible - Operation is allowed but not possible in this case.' 'For example, moving a rule up one position when it is already at the top.', '15': 'Operation denied - Operation is allowed. For example, Admin not allowed to delete own account,' 'Running a command that is not allowed on a passive device.', '16': 'Unauthorized -The API role does not have access rights to run this query.', '17': 'Invalid command -Invalid command or parameters.', '18': 'Malformed command - The XML is malformed.', # 19,20: success '21': 'Internal error - Check with technical support when seeing these errors.', '22': 'Session timed out - The session for this query timed out.' } class PAN_OS_Not_Found(Exception): """ PAN-OS Error. """ def __init__(self, *args): # real signature unknown pass def http_request(uri: str, method: str, headers: Dict = {}, body: Dict = {}, params: Dict = {}, files=None) -> Any: """ Makes an API call with the given arguments """ result = requests.request( method, uri, headers=headers, data=body, verify=USE_SSL, params=params, files=files ) if result.status_code < 200 or result.status_code >= 300: raise Exception('Request Failed. with status: ' + str(result.status_code) + '. Reason is: ' + str(result.reason)) # if pcap download if params.get('type') == 'export': return result json_result = json.loads(xml2json(result.text)) # handle non success if json_result['response']['@status'] != 'success': if 'msg' in json_result['response'] and 'line' in json_result['response']['msg']: # catch non existing object error and display a meaningful message if json_result['response']['msg']['line'] == 'No such node': raise Exception( 'Object was not found, verify that the name is correct and that the instance was committed.') # catch urlfiltering error and display a meaningful message elif str(json_result['response']['msg']['line']).find('test -> url') != -1: raise Exception('The URL filtering license is either expired or not active.' ' Please contact your PAN-OS representative.') # catch non valid jobID errors and display a meaningful message elif isinstance(json_result['response']['msg']['line'], str) and \ json_result['response']['msg']['line'].find('job') != -1 and \ (json_result['response']['msg']['line'].find('not found') != -1 or json_result['response']['msg']['line'].find('No such query job')) != -1: raise Exception('Invalid Job ID error: ' + json_result['response']['msg']['line']) # catch already at the top/bottom error for rules and return this as an entry.note elif str(json_result['response']['msg']['line']).find('already at the') != -1: demisto.results('Rule ' + str(json_result['response']['msg']['line'])) sys.exit(0) # catch already registered ip tags and return this as an entry.note elif str(json_result['response']['msg']['line']).find('already exists, ignore') != -1: if isinstance(json_result['response']['msg']['line']['uid-response']['payload']['register']['entry'], list): ips = [o['@ip'] for o in json_result['response']['msg']['line']['uid-response']['payload']['register']['entry']] else: ips = json_result['response']['msg']['line']['uid-response']['payload']['register']['entry']['@ip'] demisto.results( 'IP ' + str(ips) + ' already exist in the tag. All submitted IPs were not registered to the tag.') sys.exit(0) # catch timed out log queries and return this as an entry.note elif str(json_result['response']['msg']['line']).find('Query timed out') != -1: demisto.results(str(json_result['response']['msg']['line']) + '. Rerun the query.') sys.exit(0) if '@code' in json_result['response']: raise Exception( 'Request Failed.\nStatus code: ' + str(json_result['response']['@code']) + '\nWith message: ' + str( json_result['response']['msg']['line'])) else: raise Exception('Request Failed.\n' + str(json_result['response'])) # handle @code if 'response' in json_result and '@code' in json_result['response']: if json_result['response']['@code'] in PAN_OS_ERROR_DICT: error_message = 'Request Failed.\n' + PAN_OS_ERROR_DICT[json_result['response']['@code']] if json_result['response']['@code'] == '7' and DEVICE_GROUP: device_group_names = get_device_groups_names() if DEVICE_GROUP not in device_group_names: error_message += (f'\nDevice Group: {DEVICE_GROUP} does not exist.' f' The available Device Groups for this instance:' f' {', '.join(device_group_names)}.') raise PAN_OS_Not_Found(error_message) return_warning('List not found and might be empty', True) if json_result['response']['@code'] not in ['19', '20']: # error code non exist in dict and not of success if 'msg' in json_result['response']: raise Exception( 'Request Failed.\nStatus code: ' + str(json_result['response']['@code']) + '\nWith message: ' + str( json_result['response']['msg'])) else: raise Exception('Request Failed.\n' + str(json_result['response'])) return json_result def add_argument_list(arg: Any, field_name: str, member: Optional[bool], any_: Optional[bool] = False) -> str: member_stringify_list = '' if arg: if isinstance(arg, str): arg = [arg] for item in arg: member_stringify_list += '<member>' + item + '</member>' if field_name == 'member': return member_stringify_list elif member: return '<' + field_name + '>' + member_stringify_list + '</' + field_name + '>' else: return '<' + field_name + '>' + arg + '</' + field_name + '>' if any_: if member: return '<' + field_name + '><member>any</member></' + field_name + '>' else: return '<' + field_name + '>any</' + field_name + '>' else: return '' def add_argument(arg: Optional[str], field_name: str, member: bool) -> str: if arg: if member: return '<' + field_name + '><member>' + arg + '</member></' + field_name + '>' else: return '<' + field_name + '>' + arg + '</' + field_name + '>' else: return '' def add_argument_open(arg: Optional[str], field_name: str, member: bool) -> str: if arg: if member: return '<' + field_name + '><member>' + arg + '</member></' + field_name + '>' else: return '<' + field_name + '>' + arg + '</' + field_name + '>' else: if member: return '<' + field_name + '><member>any</member></' + field_name + '>' else: return '<' + field_name + '>any</' + field_name + '>' def add_argument_yes_no(arg: Optional[str], field_name: str, option: bool = False) -> str: if arg and arg == 'No': result = '<' + field_name + '>' + 'no' + '</' + field_name + '>' else: result = '<' + field_name + '>' + ('yes' if arg else 'no') + '</' + field_name + '>' if option: result = '<option>' + result + '</option>' return result def add_argument_target(arg: Optional[str], field_name: str) -> str: if arg: return '<' + field_name + '>' + '<devices>' + '<entry name=\"' + arg + '\"/>' + '</devices>' + '</' + field_name + '>' else: return '' def set_xpath_network(template: str = None) -> Tuple[str, Optional[str]]: """ Setting template xpath relevant to panorama instances. """ if template: if not DEVICE_GROUP or VSYS: raise Exception('Template is only relevant for Panorama instances.') if not template: template = demisto.params().get('template', None) # setting network xpath relevant to FW or panorama management if DEVICE_GROUP: xpath_network = f'/config/devices/entry[@name=\'localhost.localdomain\']/template/entry[@name=\'{template}\']' \ f'/config/devices/entry[@name=\'localhost.localdomain\']/network' else: xpath_network = "/config/devices/entry[@name='localhost.localdomain']/network" return xpath_network, template def prepare_security_rule_params(api_action: str = None, rulename: str = None, source: Any = None, destination: Any = None, negate_source: str = None, negate_destination: str = None, action: str = None, service: List[str] = None, disable: str = None, application: List[str] = None, source_user: str = None, category: List[str] = None, from_: str = None, to: str = None, description: str = None, target: str = None, log_forwarding: str = None, disable_server_response_inspection: str = None, tags: List[str] = None) -> Dict: if application is None or len(application) == 0: # application always must be specified and the default should be any application = ['any'] rulename = rulename if rulename else ('demisto-' + (str(uuid.uuid4()))[:8]) params = { 'type': 'config', 'action': api_action, 'key': API_KEY, 'element': add_argument_open(action, 'action', False) + add_argument_target(target, 'target') + add_argument_open(description, 'description', False) + add_argument_list(source, 'source', True, True) + add_argument_list(destination, 'destination', True, True) + add_argument_list(application, 'application', True) + add_argument_list(category, 'category', True) + add_argument_open(source_user, 'source-user', True) + add_argument_open(from_, 'from', True) # default from will always be any + add_argument_open(to, 'to', True) # default to will always be any + add_argument_list(service, 'service', True, True) + add_argument_yes_no(negate_source, 'negate-source') + add_argument_yes_no(negate_destination, 'negate-destination') + add_argument_yes_no(disable, 'disabled') + add_argument_yes_no(disable_server_response_inspection, 'disable-server-response-inspection', True) + add_argument(log_forwarding, 'log-setting', False) + add_argument_list(tags, 'tag', True) } if DEVICE_GROUP: if 'pre_post' not in demisto.args(): raise Exception('Please provide the pre_post argument when configuring' ' a security rule in Panorama instance.') else: params['xpath'] = XPATH_SECURITY_RULES + demisto.args()[ 'pre_post'] + '/security/rules/entry' + '[@name=\'' + rulename + '\']' else: params['xpath'] = XPATH_SECURITY_RULES + '[@name=\'' + rulename + '\']' return params def get_pan_os_version() -> str: """Retrieves pan-os version Returns: String representation of the version """ params = { 'type': 'version', 'key': API_KEY } result = http_request(URL, 'GET', params=params) version = result['response']['result']['sw-version'] return version def get_pan_os_major_version() -> int: """Retrieves pan-os major version Returns: String representation of the major version """ major_version = int(get_pan_os_version().split('.')[0]) return major_version ''' FUNCTIONS''' def panorama_test(): """ test module """ params = { 'type': 'op', 'cmd': '<show><system><info></info></system></show>', 'key': API_KEY } http_request( URL, 'GET', params=params ) if DEVICE_GROUP and DEVICE_GROUP != 'shared': device_group_test() _, template = set_xpath_network() if template: template_test(template) demisto.results('ok') def get_device_groups_names(): """ Get device group names in the Panorama """ params = { 'action': 'get', 'type': 'config', 'xpath': "/config/devices/entry/device-group/entry", 'key': API_KEY } result = http_request( URL, 'GET', params=params ) device_groups = result['response']['result']['entry'] device_group_names = [] if isinstance(device_groups, dict): # only one device group in the panorama device_group_names.append(device_groups.get('@name')) else: for device_group in device_groups: device_group_names.append(device_group.get('@name')) return device_group_names def device_group_test(): """ Test module for the Device group specified """ device_group_names = get_device_groups_names() if DEVICE_GROUP not in device_group_names: raise Exception(f'Device Group: {DEVICE_GROUP} does not exist.' f' The available Device Groups for this instance: {', '.join(device_group_names)}.') def get_templates_names(): """ Get templates names in the Panorama """ params = { 'action': 'get', 'type': 'config', 'xpath': "/config/devices/entry[@name=\'localhost.localdomain\']/template/entry", 'key': API_KEY } result = http_request( URL, 'GET', params=params ) templates = result['response']['result']['entry'] template_names = [] if isinstance(templates, dict): # only one device group in the panorama template_names.append(templates.get('@name')) else: for template in templates: template_names.append(template.get('@name')) return template_names def template_test(template): """ Test module for the Template specified """ template_names = get_templates_names() if template not in template_names: raise Exception(f'Template: {template} does not exist.' f' The available Templates for this instance: {', '.join(template_names)}.') @logger def panorama_command(): """ Executes a command """ params = {} for arg in demisto.args().keys(): params[arg] = demisto.args()[arg] params['key'] = API_KEY result = http_request( URL, 'POST', body=params ) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Command was executed successfully.', }) @logger def panorama_commit(): params = { 'type': 'commit', 'cmd': '<commit></commit>', 'key': API_KEY } result = http_request( URL, 'POST', body=params ) return result def panorama_commit_command(): """ Commit and show message in warroom """ result = panorama_commit() if 'result' in result['response']: # commit has been given a jobid commit_output = { 'JobID': result['response']['result']['job'], 'Status': 'Pending' } demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Commit:', commit_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': { "Panorama.Commit(val.JobID == obj.JobID)": commit_output } }) else: # no changes to commit demisto.results(result['response']['msg']) @logger def panorama_commit_status(): params = { 'type': 'op', 'cmd': '<show><jobs><id>' + demisto.args()['job_id'] + '</id></jobs></show>', 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def panorama_commit_status_command(): """ Check jobID of commit status """ result = panorama_commit_status() if result['response']['result']['job']['type'] != 'Commit': raise Exception('JobID given is not of a commit.') commit_status_output = {'JobID': result['response']['result']['job']['id']} if result['response']['result']['job']['status'] == 'FIN': if result['response']['result']['job']['result'] == 'OK': commit_status_output['Status'] = 'Completed' else: # result['response']['job']['result'] == 'FAIL' commit_status_output['Status'] = 'Failed' commit_status_output['Details'] = result['response']['result']['job']['details']['line'] if result['response']['result']['job']['status'] == 'ACT': if result['response']['result']['job']['result'] == 'PEND': commit_status_output['Status'] = 'Pending' # WARNINGS - Job warnings status_warnings = [] if result.get("response", {}).get('result', {}).get('job', {}).get('warnings', {}): status_warnings = result.get("response", {}).get('result', {}).get('job', {}).get('warnings', {}).get('line', []) ignored_error = 'configured with no certificate profile' commit_status_output["Warnings"] = [item for item in status_warnings if item not in ignored_error] demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Commit status:', commit_status_output, ['JobID', 'Status', 'Details', 'Warnings'], removeNull=True), 'EntryContext': {"Panorama.Commit(val.JobID == obj.JobID)": commit_status_output} }) @logger def panorama_push_to_device_group(): params = { 'type': 'commit', 'action': 'all', 'cmd': '<commit-all><shared-policy><device-group><entry name=\"' + DEVICE_GROUP + '\"/></device-group></shared-policy></commit-all>', 'key': API_KEY } result = http_request( URL, 'POST', body=params ) return result def panorama_push_to_device_group_command(): """ Push Panorama configuration and show message in warroom """ if not DEVICE_GROUP: raise Exception("The 'panorama-push-to-device-group' command is relevant for a Palo Alto Panorama instance.") result = panorama_push_to_device_group() if 'result' in result['response']: # commit has been given a jobid push_output = { 'DeviceGroup': DEVICE_GROUP, 'JobID': result['response']['result']['job'], 'Status': 'Pending' } demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Push to Device Group:', push_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': { "Panorama.Push(val.JobID == obj.JobID)": push_output } }) else: # no changes to commit demisto.results(result['response']['msg']['line']) @logger def panorama_push_status(): params = { 'type': 'op', 'cmd': '<show><jobs><id>' + demisto.args()['job_id'] + '</id></jobs></show>', 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def safeget(dct, keys): # Safe get from dictionary for key in keys: try: if isinstance(dct, dict): dct = dct[key] else: return None except KeyError: return None return dct def panorama_push_status_command(): """ Check jobID of push status """ result = panorama_push_status() job = result.get('response', {}).get('result', {}).get('job', {}) if job.get('type', '') != 'CommitAll': raise Exception('JobID given is not of a Push.') push_status_output = {'JobID': job.get('id')} if job.get('status', '') == 'FIN': if job.get('result', '') == 'OK': push_status_output['Status'] = 'Completed' else: push_status_output['Status'] = 'Failed' devices = job.get('devices') devices = devices.get('entry') if devices else devices if isinstance(devices, list): devices_details = [device.get('status') for device in devices if device] push_status_output['Details'] = devices_details elif isinstance(devices, dict): push_status_output['Details'] = devices.get('status') if job.get('status') == 'PEND': push_status_output['Status'] = 'Pending' # WARNINGS - Job warnings status_warnings = [] # type: ignore devices = safeget(result, ["response", "result", "job", "devices", "entry"]) if devices: for device in devices: device_warnings = safeget(device, ["details", "msg", "warnings", "line"]) status_warnings.extend([] if not device_warnings else device_warnings) push_status_output["Warnings"] = status_warnings demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Push to Device Group status:', push_status_output, ['JobID', 'Status', 'Details', 'Warnings'], removeNull=True), 'EntryContext': {"Panorama.Push(val.JobID == obj.JobID)": push_status_output} }) ''' Addresses Commands ''' def prettify_addresses_arr(addresses_arr: list) -> List: if not isinstance(addresses_arr, list): return prettify_address(addresses_arr) pretty_addresses_arr = [] for address in addresses_arr: pretty_address = {'Name': address['@name']} if DEVICE_GROUP: pretty_address['DeviceGroup'] = DEVICE_GROUP if 'description' in address: pretty_address['Description'] = address['description'] if 'ip-netmask' in address: pretty_address['IP_Netmask'] = address['ip-netmask'] if 'ip-range' in address: pretty_address['IP_Range'] = address['ip-range'] if 'fqdn' in address: pretty_address['FQDN'] = address['fqdn'] if 'tag' in address and 'member' in address['tag']: pretty_address['Tags'] = address['tag']['member'] pretty_addresses_arr.append(pretty_address) return pretty_addresses_arr @logger def panorama_list_addresses(tag=None): params = { 'action': 'get', 'type': 'config', 'xpath': XPATH_OBJECTS + "address/entry", 'key': API_KEY } if tag: params['xpath'] += f'[( tag/member = \'{tag}\')]' result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_list_addresses_command(): """ Get all addresses """ tag = demisto.args().get('tag') addresses_arr = panorama_list_addresses(tag) addresses_output = prettify_addresses_arr(addresses_arr) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': addresses_arr, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Addresses:', addresses_output, ['Name', 'IP_Netmask', 'IP_Range', 'FQDN', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.Addresses(val.Name == obj.Name)": addresses_output } }) def prettify_address(address: Dict) -> Dict: pretty_address = {'Name': address['@name']} if DEVICE_GROUP: pretty_address['DeviceGroup'] = DEVICE_GROUP if 'description' in address: pretty_address['Description'] = address['description'] if 'ip-netmask' in address: pretty_address['IP_Netmask'] = address['ip-netmask'] if 'ip-range' in address: pretty_address['IP_Range'] = address['ip-range'] if 'fqdn' in address: pretty_address['FQDN'] = address['fqdn'] if 'tag' in address and 'member' in address['tag']: pretty_address['Tags'] = address['tag']['member'] return pretty_address @logger def panorama_get_address(address_name: str) -> Dict: params = { 'action': 'show', 'type': 'config', 'xpath': XPATH_OBJECTS + "address/entry[@name='" + address_name + "']", 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_get_address_command(): """ Get an address """ address_name = demisto.args()['name'] address = panorama_get_address(address_name) address_output = prettify_address(address) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': address, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Address:', address_output, ['Name', 'IP_Netmask', 'IP_Range', 'FQDN', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.Addresses(val.Name == obj.Name)": address_output } }) @logger def panorama_create_address(address_name: str, fqdn: str = None, ip_netmask: str = None, ip_range: str = None, description: str = None, tags: list = None): params = {'action': 'set', 'type': 'config', 'xpath': XPATH_OBJECTS + "address/entry[@name='" + address_name + "']", 'key': API_KEY, 'element': (add_argument(fqdn, 'fqdn', False) + add_argument(ip_netmask, 'ip-netmask', False) + add_argument(ip_range, 'ip-range', False) + add_argument(description, 'description', False) + add_argument_list(tags, 'tag', True)) } http_request( URL, 'POST', body=params, ) def panorama_create_address_command(): """ Create an address object """ address_name = demisto.args()['name'] description = demisto.args().get('description') tags = argToList(demisto.args()['tag']) if 'tag' in demisto.args() else None fqdn = demisto.args().get('fqdn') ip_netmask = demisto.args().get('ip_netmask') ip_range = demisto.args().get('ip_range') if not fqdn and not ip_netmask and not ip_range: raise Exception('Please specify exactly one of the following: fqdn, ip_netmask, ip_range.') if (fqdn and ip_netmask) or (fqdn and ip_range) or (ip_netmask and ip_range): raise Exception('Please specify exactly one of the following: fqdn, ip_netmask, ip_range.') address = panorama_create_address(address_name, fqdn, ip_netmask, ip_range, description, tags) address_output = {'Name': address_name} if DEVICE_GROUP: address_output['DeviceGroup'] = DEVICE_GROUP if fqdn: address_output['FQDN'] = fqdn if ip_netmask: address_output['IP_Netmask'] = ip_netmask if ip_range: address_output['IP_Range'] = ip_range if description: address_output['Description'] = description if tags: address_output['Tags'] = tags demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': address, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Address was created successfully.', 'EntryContext': { "Panorama.Addresses(val.Name == obj.Name)": address_output } }) @logger def panorama_delete_address(address_name: str): params = { 'action': 'delete', 'type': 'config', 'xpath': XPATH_OBJECTS + "address/entry[@name='" + address_name + "']", 'element': "<entry name='" + address_name + "'></entry>", 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_delete_address_command(): """ Delete an address """ address_name = demisto.args()['name'] address = panorama_delete_address(address_name) address_output = {'Name': address_name} if DEVICE_GROUP: address_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': address, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Address was deleted successfully.', 'EntryContext': { "Panorama.Addresses(val.Name == obj.Name)": address_output } }) ''' Address Group Commands ''' def prettify_address_groups_arr(address_groups_arr: list) -> List: if not isinstance(address_groups_arr, list): return prettify_address_group(address_groups_arr) pretty_address_groups_arr = [] for address_group in address_groups_arr: pretty_address_group = { 'Name': address_group['@name'], 'Type': 'static' if 'static' in address_group else 'dynamic' } if DEVICE_GROUP: pretty_address_group['DeviceGroup'] = DEVICE_GROUP if 'description' in address_group: pretty_address_group['Description'] = address_group['description'] if 'tag' in address_group and 'member' in address_group['tag']: pretty_address_group['Tags'] = address_group['tag']['member'] if pretty_address_group['Type'] == 'static': # static address groups can have empty lists if address_group['static']: pretty_address_group['Addresses'] = address_group['static']['member'] else: pretty_address_group['Match'] = address_group['dynamic']['filter'] pretty_address_groups_arr.append(pretty_address_group) return pretty_address_groups_arr @logger def panorama_list_address_groups(tag: str = None): params = { 'action': 'get', 'type': 'config', 'xpath': XPATH_OBJECTS + "address-group/entry", 'key': API_KEY } if tag: params['xpath'] += f'[( tag/member = \'{tag}\')]' result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_list_address_groups_command(): """ Get all address groups """ tag = demisto.args().get('tag') address_groups_arr = panorama_list_address_groups(tag) address_groups_output = prettify_address_groups_arr(address_groups_arr) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': address_groups_arr, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Address groups:', address_groups_output, ['Name', 'Type', 'Addresses', 'Match', 'Description', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.AddressGroups(val.Name == obj.Name)": address_groups_output } }) def prettify_address_group(address_group: Dict) -> Dict: pretty_address_group = { 'Name': address_group['@name'], 'Type': 'static' if 'static' in address_group else 'dynamic' } if DEVICE_GROUP: pretty_address_group['DeviceGroup'] = DEVICE_GROUP if 'description' in address_group: pretty_address_group['Description'] = address_group['description'] if 'tag' in address_group and 'member' in address_group['tag']: pretty_address_group['Tags'] = address_group['tag']['member'] if pretty_address_group['Type'] == 'static': pretty_address_group['Addresses'] = address_group['static']['member'] else: pretty_address_group['Match'] = address_group['dynamic']['filter'] return pretty_address_group @logger def panorama_get_address_group(address_group_name: str): params = { 'action': 'show', 'type': 'config', 'xpath': XPATH_OBJECTS + "address-group/entry[@name='" + address_group_name + "']", 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_get_address_group_command(): """ Get an address group """ address_group_name = demisto.args()['name'] result = panorama_get_address_group(address_group_name) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Address group:', prettify_address_group(result), ['Name', 'Type', 'Addresses', 'Match', 'Description', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.AddressGroups(val.Name == obj.Name)": prettify_address_group(result) } }) @logger def panorama_create_static_address_group(address_group_name: str, addresses: list, description: str = None, tags: list = None): params = {'action': 'set', 'type': 'config', 'xpath': XPATH_OBJECTS + "address-group/entry[@name='" + address_group_name + "']", 'key': API_KEY, 'element': ( "<static>" + add_argument_list(addresses, 'member', True) + "</static>" + add_argument(description, 'description', False) + add_argument_list(tags, 'tag', True) )} result = http_request( URL, 'POST', body=params, ) return result def panorama_create_dynamic_address_group(address_group_name: str, match: str, description: str = None, tags: list = None): params = { 'action': 'set', 'type': 'config', 'xpath': XPATH_OBJECTS + "address-group/entry[@name='" + address_group_name + "']", 'element': "<dynamic>" + add_argument(match, 'filter', False) + "</dynamic>" + add_argument(description, 'description', False) + add_argument_list(tags, 'tag', True), 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_create_address_group_command(): """ Create an address group """ address_group_name = demisto.args()['name'] type_ = demisto.args()['type'] description = demisto.args().get('description') tags = argToList(demisto.args()['tags']) if 'tags' in demisto.args() else None match = demisto.args().get('match') addresses = argToList(demisto.args()['addresses']) if 'addresses' in demisto.args() else None if match and addresses: raise Exception('Please specify only one of the following: addresses, match.') if type_ == 'static': if not addresses: raise Exception('Please specify addresses in order to create a static address group.') if type_ == 'dynamic': if not match: raise Exception('Please specify a match in order to create a dynamic address group.') if type_ == 'static': result = panorama_create_static_address_group(address_group_name, addresses, description, tags) else: result = panorama_create_dynamic_address_group(address_group_name, match, description, tags) address_group_output = { 'Name': address_group_name, 'Type': type_ } if DEVICE_GROUP: address_group_output['DeviceGroup'] = DEVICE_GROUP if match: address_group_output['Match'] = match if addresses: address_group_output['Addresses'] = addresses if description: address_group_output['Description'] = description if tags: address_group_output['Tags'] = tags demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Address group was created successfully.', 'EntryContext': { "Panorama.AddressGroups(val.Name == obj.Name)": address_group_output } }) @logger def panorama_delete_address_group(address_group_name: str): params = { 'action': 'delete', 'type': 'config', 'xpath': XPATH_OBJECTS + "address-group/entry[@name='" + address_group_name + "']", 'element': "<entry name='" + address_group_name + "'></entry>", 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_delete_address_group_command(): """ Delete an address group """ address_group_name = demisto.args()['name'] address_group = panorama_delete_address_group(address_group_name) address_group_output = {'Name': address_group_name} if DEVICE_GROUP: address_group_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': address_group, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Address group was deleted successfully.', 'EntryContext': { "Panorama.AddressGroups(val.Name == obj.Name)": address_group_output } }) def panorama_edit_address_group_command(): """ Edit an address group """ address_group_name = demisto.args()['name'] type_ = demisto.args()['type'] match = demisto.args().get('match') element_to_add = argToList(demisto.args()['element_to_add']) if 'element_to_add' in demisto.args() else None element_to_remove = argToList( demisto.args()['element_to_remove']) if 'element_to_remove' in demisto.args() else None if type_ == 'dynamic': if not match: raise Exception('To edit a Dynamic Address group, Please provide a match.') match_param = add_argument_open(match, 'filter', False) match_path = XPATH_OBJECTS + "address-group/entry[@name='" + address_group_name + "']/dynamic/filter" if type_ == 'static': if (element_to_add and element_to_remove) or (not element_to_add and not element_to_remove): raise Exception('To edit a Static Address group,' 'Please specify exactly one of the following: element_to_add, element_to_remove.') address_group_prev = panorama_get_address_group(address_group_name) address_group_list: List[str] = [] if 'static' in address_group_prev: if address_group_prev['static']: address_group_list = argToList(address_group_prev['static']['member']) if element_to_add: addresses = list(set(element_to_add + address_group_list)) else: addresses = [item for item in address_group_list if item not in element_to_remove] addresses_param = add_argument_list(addresses, 'member', False) addresses_path = XPATH_OBJECTS + "address-group/entry[@name='" + address_group_name + "']/static" description = demisto.args().get('description') tags = argToList(demisto.args()['tags']) if 'tags' in demisto.args() else None params = { 'action': 'edit', 'type': 'config', 'key': API_KEY, 'xpath': '', 'element': '' } address_group_output = {'Name': address_group_name} if DEVICE_GROUP: address_group_output['DeviceGroup'] = DEVICE_GROUP if type_ == 'dynamic' and match: params['xpath'] = match_path params['element'] = match_param result = http_request( URL, 'POST', body=params ) address_group_output['Match'] = match if type_ == 'static' and addresses: params['xpath'] = addresses_path params['element'] = "<static>" + addresses_param + "</static>" result = http_request( URL, 'POST', body=params ) address_group_output['Addresses'] = addresses if description: description_param = add_argument_open(description, 'description', False) description_path = XPATH_OBJECTS + "address-group/entry[@name='" + address_group_name + "']/description" params['xpath'] = description_path params['element'] = description_param result = http_request( URL, 'POST', body=params ) address_group_output['Description'] = description if tags: tag_param = add_argument_list(tags, 'tag', True) tag_path = XPATH_OBJECTS + "address-group/entry[@name='" + address_group_name + "']/tag" params['xpath'] = tag_path params['element'] = tag_param result = http_request( URL, 'POST', body=params ) address_group_output['Tags'] = tags demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Address Group was edited successfully.', 'EntryContext': { "Panorama.AddressGroups(val.Name == obj.Name)": address_group_output } }) ''' Services Commands ''' def prettify_services_arr(services_arr: list): if not isinstance(services_arr, list): return prettify_service(services_arr) pretty_services_arr = [] for service in services_arr: pretty_service = {'Name': service['@name']} if DEVICE_GROUP: pretty_service['DeviceGroup'] = DEVICE_GROUP if 'description' in service: pretty_service['Description'] = service['description'] if 'tag' in service and 'member' in service['tag']: pretty_service['Tags'] = service['tag']['member'] protocol = '' if 'protocol' in service: if 'tcp' in service['protocol']: protocol = 'tcp' elif 'udp' in service['protocol']: protocol = 'udp' else: protocol = 'sctp' pretty_service['Protocol'] = protocol if 'port' in service['protocol'][protocol]: pretty_service['DestinationPort'] = service['protocol'][protocol]['port'] if 'source-port' in service['protocol'][protocol]: pretty_service['SourcePort'] = service['protocol'][protocol]['source-port'] pretty_services_arr.append(pretty_service) return pretty_services_arr @logger def panorama_list_services(tag: str = None): params = { 'action': 'get', 'type': 'config', 'xpath': XPATH_OBJECTS + "service/entry", 'key': API_KEY } if tag: params['xpath'] += f'[( tag/member = \'{tag}\')]' result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_list_services_command(): """ Get all Services """ tag = demisto.args().get('tag') services_arr = panorama_list_services(tag) services_output = prettify_services_arr(services_arr) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': services_arr, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Services:', services_output, ['Name', 'Protocol', 'SourcePort', 'DestinationPort', 'Description', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.Services(val.Name == obj.Name)": services_output } }) def prettify_service(service: Dict): pretty_service = { 'Name': service['@name'], } if DEVICE_GROUP: pretty_service['DeviceGroup'] = DEVICE_GROUP if 'description' in service: pretty_service['Description'] = service['description'] if 'tag' in service and 'member' in service['tag']: pretty_service['Tags'] = service['tag']['member'] protocol = '' if 'protocol' in service: if 'tcp' in service['protocol']: protocol = 'tcp' elif 'udp' in service['protocol']: protocol = 'udp' else: protocol = 'sctp' pretty_service['Protocol'] = protocol if 'port' in service['protocol'][protocol]: pretty_service['DestinationPort'] = service['protocol'][protocol]['port'] if 'source-port' in service['protocol'][protocol]: pretty_service['SourcePort'] = service['protocol'][protocol]['source-port'] return pretty_service @logger def panorama_get_service(service_name: str): params = { 'action': 'show', 'type': 'config', 'xpath': XPATH_OBJECTS + "service/entry[@name='" + service_name + "']", 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_get_service_command(): """ Get a service """ service_name = demisto.args()['name'] service = panorama_get_service(service_name) service_output = prettify_service(service) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': service, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Address:', service_output, ['Name', 'Protocol', 'SourcePort', 'DestinationPort', 'Description', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.Services(val.Name == obj.Name)": service_output } }) @logger def panorama_create_service(service_name: str, protocol: str, destination_port: str, source_port: str = None, description: str = None, tags: list = None): params = { 'action': 'set', 'type': 'config', 'xpath': XPATH_OBJECTS + "service/entry[@name='" + service_name + "']", 'key': API_KEY, 'element': '<protocol>' + '<' + protocol + '>' + add_argument(destination_port, 'port', False) + add_argument(source_port, 'source-port', False) + '</' + protocol + '>' + '</protocol>' + add_argument(description, 'description', False) + add_argument_list(tags, 'tag', True) } result = http_request( URL, 'POST', body=params, ) return result def panorama_create_service_command(): """ Create a service object """ service_name = demisto.args()['name'] protocol = demisto.args()['protocol'] destination_port = demisto.args()['destination_port'] source_port = demisto.args().get('source_port') description = demisto.args().get('description') tags = argToList(demisto.args()['tags']) if 'tags' in demisto.args() else None service = panorama_create_service(service_name, protocol, destination_port, source_port, description, tags) service_output = { 'Name': service_name, 'Protocol': protocol, 'DestinationPort': destination_port } if DEVICE_GROUP: service_output['DeviceGroup'] = DEVICE_GROUP if source_port: service_output['SourcePort'] = source_port if description: service_output['Description'] = description if tags: service_output['Tags'] = tags demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': service, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Service was created successfully.', 'EntryContext': { "Panorama.Services(val.Name == obj.Name)": service_output } }) @logger def panorama_delete_service(service_name: str): params = { 'action': 'delete', 'type': 'config', 'xpath': XPATH_OBJECTS + "service/entry[@name='" + service_name + "']", 'element': "<entry name='" + service_name + "'></entry>", 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_delete_service_command(): """ Delete a service """ service_name = demisto.args()['name'] service = panorama_delete_service(service_name) service_output = {'Name': service_name} if DEVICE_GROUP: service_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': service, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Service was deleted successfully.', 'EntryContext': { "Panorama.Services(val.Name == obj.Name)": service_output } }) ''' Service Group Commands ''' def prettify_service_groups_arr(service_groups_arr: list): if not isinstance(service_groups_arr, list): return prettify_service_group(service_groups_arr) pretty_service_groups_arr = [] for service_group in service_groups_arr: pretty_service_group = { 'Name': service_group['@name'], 'Services': service_group['members']['member'] } if DEVICE_GROUP: pretty_service_group['DeviceGroup'] = DEVICE_GROUP if 'tag' in service_group and 'member' in service_group['tag']: pretty_service_group['Tags'] = service_group['tag']['member'] pretty_service_groups_arr.append(pretty_service_group) return pretty_service_groups_arr @logger def panorama_list_service_groups(tag: str = None): params = { 'action': 'get', 'type': 'config', 'xpath': XPATH_OBJECTS + "service-group/entry", 'key': API_KEY } if tag: params['xpath'] += f'[( tag/member = \'{tag}\')]' result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_list_service_groups_command(): """ Get all address groups """ tag = demisto.args().get('tag') service_groups_arr = panorama_list_service_groups(tag) service_groups_output = prettify_service_groups_arr(service_groups_arr) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': service_groups_arr, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Service groups:', service_groups_output, ['Name', 'Services', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.ServiceGroups(val.Name == obj.Name)": service_groups_output } }) def prettify_service_group(service_group: dict): pretty_service_group = { 'Name': service_group['@name'], 'Services': service_group['members']['member'] } if DEVICE_GROUP: pretty_service_group['DeviceGroup'] = DEVICE_GROUP if 'tag' in service_group and 'member' in service_group['tag']: pretty_service_group['Tags'] = service_group['tag']['member'] return pretty_service_group @logger def panorama_get_service_group(service_group_name): params = { 'action': 'show', 'type': 'config', 'xpath': XPATH_OBJECTS + "service-group/entry[@name='" + service_group_name + "']", 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_get_service_group_command(): """ Get an address group """ service_group_name = demisto.args()['name'] result = panorama_get_service_group(service_group_name) pretty_service_group = prettify_service_group(result) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Service group:', pretty_service_group, ['Name', 'Services', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.ServiceGroups(val.Name == obj.Name)": pretty_service_group } }) def panorama_create_service_group(service_group_name, services, tags): params = { 'action': 'set', 'type': 'config', 'xpath': XPATH_OBJECTS + "service-group/entry[@name='" + service_group_name + "']", 'element': '<members>' + add_argument_list(services, 'member', True) + '</members>' + add_argument_list(tags, 'tag', True), 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_create_service_group_command(): """ Create a service group """ service_group_name = demisto.args()['name'] services = argToList(demisto.args()['services']) tags = argToList(demisto.args()['tags']) if 'tags' in demisto.args() else None result = panorama_create_service_group(service_group_name, services, tags) service_group_output = { 'Name': service_group_name, 'Services': services } if DEVICE_GROUP: service_group_output['DeviceGroup'] = DEVICE_GROUP if tags: service_group_output['Tags'] = tags demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Service group was created successfully.', 'EntryContext': { "Panorama.ServiceGroups(val.Name == obj.Name)": service_group_output } }) @logger def panorama_delete_service_group(service_group_name): params = { 'action': 'delete', 'type': 'config', 'xpath': XPATH_OBJECTS + "service-group/entry[@name='" + service_group_name + "']", 'element': "<entry name='" + service_group_name + "'></entry>", 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_delete_service_group_command(): """ Delete a service group """ service_group_name = demisto.args()['name'] service_group = panorama_delete_service_group(service_group_name) service_group_output = {'Name': service_group_name} if DEVICE_GROUP: service_group_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': service_group, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Service group was deleted successfully.', 'EntryContext': { "Panorama.ServiceGroups(val.Name == obj.Name)": service_group_output } }) @logger def panorama_edit_service_group(service_group_name, services, tag): params = { 'action': 'edit', 'type': 'config', 'xpath': '', 'element': '', 'key': API_KEY, } if services: services_xpath = XPATH_OBJECTS + "service-group/entry[@name='" + service_group_name + "']/members" services_element = '<members>' + add_argument_list(services, 'member', False) + '</members>' params['xpath'] = services_xpath params['element'] = services_element result = http_request( URL, 'POST', body=params ) if tag: tag_xpath = XPATH_OBJECTS + "service-group/entry[@name='" + service_group_name + "']/tag" tag_element = add_argument_list(tag, 'tag', True) params['xpath'] = tag_xpath params['element'] = tag_element result = http_request( URL, 'POST', body=params ) return result def panorama_edit_service_group_command(): """ Edit a service group """ service_group_name = demisto.args()['name'] services_to_add = argToList(demisto.args()['services_to_add']) if 'services_to_add' in demisto.args() else None services_to_remove = argToList( demisto.args()['services_to_remove']) if 'services_to_remove' in demisto.args() else None tag = argToList(demisto.args()['tag']) if 'tag' in demisto.args() else None if not services_to_add and not services_to_remove and not tag: raise Exception('Specify at least one of the following arguments: services_to_add, services_to_remove, tag') if services_to_add and services_to_remove: raise Exception('Specify at most one of the following arguments: services_to_add, services_to_remove') services: List[str] = [] if services_to_add or services_to_remove: service_group_prev = panorama_get_service_group(service_group_name) service_group_list = argToList(service_group_prev['members']['member']) if services_to_add: services = list(set(services_to_add + service_group_list)) else: services = [item for item in service_group_list if item not in services_to_remove] if len(services) == 0: raise Exception('A Service group must have at least one service.') result = panorama_edit_service_group(service_group_name, services, tag) service_group_output = {'Name': service_group_name} if DEVICE_GROUP: service_group_output['DeviceGroup'] = DEVICE_GROUP if len(services) > 0: service_group_output['Services'] = services if tag: service_group_output['Tag'] = tag demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Service group was edited successfully.', 'EntryContext': { "Panorama.ServiceGroups(val.Name == obj.Name)": service_group_output } }) ''' Custom URL Category Commands ''' def prettify_custom_url_category(custom_url_category): pretty_custom_url_category = { 'Name': custom_url_category['@name'], } if DEVICE_GROUP: pretty_custom_url_category['DeviceGroup'] = DEVICE_GROUP if 'description' in custom_url_category: pretty_custom_url_category['Description'] = custom_url_category['description'] # In PAN-OS 9.X changes to the default behavior were introduced regarding custom url categories. if 'type' in custom_url_category: pretty_custom_url_category['Type'] = custom_url_category['type'] if pretty_custom_url_category['Type'] == 'Category Match': pretty_custom_url_category['Categories'] = custom_url_category['list']['member'] else: pretty_custom_url_category['Sites'] = custom_url_category['list']['member'] else: pretty_custom_url_category['Sites'] = custom_url_category['list']['member'] return pretty_custom_url_category @logger def panorama_get_custom_url_category(name): params = { 'action': 'get', 'type': 'config', 'xpath': XPATH_OBJECTS + "profiles/custom-url-category/entry[@name='" + name + "']", 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_get_custom_url_category_command(): """ Get a custom url category """ name = demisto.args()['name'] custom_url_category = panorama_get_custom_url_category(name) custom_url_category_output = prettify_custom_url_category(custom_url_category) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': custom_url_category, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Custom URL Category:', custom_url_category_output, ['Name', 'Type', 'Categories', 'Sites', 'Description'], removeNull=True), 'EntryContext': { "Panorama.CustomURLCategory(val.Name == obj.Name)": custom_url_category_output } }) @logger def panorama_create_custom_url_category(custom_url_category_name: str, type_: Any = None, sites=None, categories=None, description: str = None): # In PAN-OS 9.X changes to the default behavior were introduced regarding custom url categories. major_version = get_pan_os_major_version() element = add_argument(description, 'description', False) if major_version <= 8: if type_ or categories: raise Exception('The type and categories arguments are only relevant for PAN-OS 9.x versions.') element += add_argument_list(sites, 'list', True) else: # major is 9.x if not type_: raise Exception('The type argument is mandatory for PAN-OS 9.x versions.') if (not sites and not categories) or (sites and categories): raise Exception('Exactly one of the sites and categories arguments should be defined.') if (type_ == 'URL List' and categories) or (type_ == 'Category Match' and sites): raise Exception('URL List type is only for sites, Category Match is only for categories.') if type_ == 'URL List': element += add_argument_list(sites, 'list', True) else: element += add_argument_list(categories, 'list', True) element += add_argument(type_, 'type', False) params = { 'action': 'set', 'type': 'config', 'xpath': XPATH_OBJECTS + "profiles/custom-url-category/entry[@name='" + custom_url_category_name + "']", 'element': element, 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) custom_url_category_output = {'Name': custom_url_category_name} if DEVICE_GROUP: custom_url_category_output['DeviceGroup'] = DEVICE_GROUP if description: custom_url_category_output['Description'] = description if type_: custom_url_category_output['Type'] = type_ if sites: custom_url_category_output['Sites'] = sites else: custom_url_category_output['Categories'] = categories return result, custom_url_category_output def panorama_create_custom_url_category_command(): """ Create a custom URL category """ custom_url_category_name = demisto.args()['name'] type_ = demisto.args()['type'] if 'type' in demisto.args() else None sites = argToList(demisto.args()['sites']) if 'sites' in demisto.args() else None categories = argToList(demisto.args()['categories']) if 'categories' in demisto.args() else None description = demisto.args().get('description') custom_url_category, custom_url_category_output = panorama_create_custom_url_category(custom_url_category_name, type_, sites, categories, description) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': custom_url_category, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Created Custom URL Category:', custom_url_category_output, ['Name', 'Type', 'Categories', 'Sites', 'Description'], removeNull=True), 'EntryContext': { "Panorama.CustomURLCategory(val.Name == obj.Name)": custom_url_category_output } }) @logger def panorama_delete_custom_url_category(custom_url_category_name): params = { 'action': 'delete', 'type': 'config', 'xpath': XPATH_OBJECTS + "profiles/custom-url-category/entry[@name='" + custom_url_category_name + "']", 'element': "<entry name='" + custom_url_category_name + "'></entry>", 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_delete_custom_url_category_command(): """ Delete a custom url category """ custom_url_category_name = demisto.args()['name'] result = panorama_delete_custom_url_category(custom_url_category_name) custom_url_category_output = {'Name': custom_url_category_name} if DEVICE_GROUP: custom_url_category_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Custom URL category was deleted successfully.', 'EntryContext': { "Panorama.CustomURLCategory(val.Name == obj.Name)": custom_url_category_output } }) @logger def panorama_edit_custom_url_category(custom_url_category_name, type_, items, description=None): major_version = get_pan_os_major_version() description_element = add_argument(description, 'description', False) items_element = add_argument_list(items, 'list', True) if major_version <= 8: if type_ == 'Category Match': raise Exception('The Categories argument is only relevant for PAN-OS 9.x versions.') element = f"<entry name='{custom_url_category_name}'>{description_element}{items_element}</entry>" else: type_element = add_argument(type_, 'type', False) element = f"<entry name='{custom_url_category_name}'>{description_element}{items_element}{type_element}</entry>" params = { 'action': 'edit', 'type': 'config', 'xpath': XPATH_OBJECTS + "profiles/custom-url-category/entry[@name='" + custom_url_category_name + "']", 'element': element, 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) custom_url_category_output = {'Name': custom_url_category_name, 'Type': type_} if DEVICE_GROUP: custom_url_category_output['DeviceGroup'] = DEVICE_GROUP if description: custom_url_category_output['Description'] = description if type_ == 'Category Match': custom_url_category_output['Categories'] = items else: custom_url_category_output['Sites'] = items return result, custom_url_category_output def panorama_custom_url_category_add_items(custom_url_category_name, items, type_): """ Add sites or categories to a configured custom url category """ custom_url_category = panorama_get_custom_url_category(custom_url_category_name) if '@dirtyId' in custom_url_category: raise Exception('Please commit the instance prior to editing the Custom URL Category.') description = custom_url_category.get('description') custom_url_category_items: List[str] = [] if 'list' in custom_url_category: if custom_url_category['list']: custom_url_category_items = argToList(custom_url_category['list']['member']) merged_items = list((set(items)).union(set(custom_url_category_items))) result, custom_url_category_output = panorama_edit_custom_url_category(custom_url_category_name, type_, merged_items, description) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Updated Custom URL Category:', custom_url_category_output, ['Name', 'Type', 'Categories', 'Sites', 'Description'], removeNull=True), 'EntryContext': { "Panorama.CustomURLCategory(val.Name == obj.Name)": custom_url_category_output } }) def panorama_custom_url_category_remove_items(custom_url_category_name, items, type_): """ Add sites or categories to a configured custom url category """ custom_url_category = panorama_get_custom_url_category(custom_url_category_name) if '@dirtyId' in custom_url_category: raise Exception('Please commit the instance prior to editing the Custom URL Category.') description = custom_url_category.get('description') if 'list' in custom_url_category: if 'member' in custom_url_category['list']: custom_url_category_items = custom_url_category['list']['member'] if not custom_url_category_items: raise Exception('Custom url category does not contain sites or categories.') subtracted_items = [item for item in custom_url_category_items if item not in items] result, custom_url_category_output = panorama_edit_custom_url_category(custom_url_category_name, type_, subtracted_items, description) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Updated Custom URL Category:', custom_url_category_output, ['Name', 'Categories', 'Sites', 'Description'], removeNull=True), 'EntryContext': { "Panorama.CustomURLCategory(val.Name == obj.Name)": custom_url_category_output } }) def panorama_edit_custom_url_category_command(): custom_url_category_name = demisto.args()['name'] items = argToList(demisto.args()['sites']) if 'sites' in demisto.args() else argToList(demisto.args()['categories']) type_ = "URL List" if 'sites' in demisto.args() else "Category Match" if demisto.args()['action'] == 'remove': panorama_custom_url_category_remove_items(custom_url_category_name, items, type_) else: panorama_custom_url_category_add_items(custom_url_category_name, items, type_) ''' URL Filtering ''' @logger def panorama_get_url_category(url_cmd, url): params = { 'action': 'show', 'type': 'op', 'key': API_KEY, 'cmd': f'<test><{url_cmd}>{url}</{url_cmd}></test>' } raw_result = http_request( URL, 'POST', body=params, ) result = raw_result['response']['result'] if url_cmd == 'url-info-host': category = result.split(': ')[1] else: result = result.splitlines()[1] if url_cmd == 'url': category = result.split(' ')[1] else: # url-info-cloud category = result.split(',')[3] return category def populate_url_filter_category_from_context(category): url_filter_category = demisto.dt(demisto.context(), f'Panorama.URLFilter(val.Category === "{category}")') if not url_filter_category: return [] if type(url_filter_category) is list: return url_filter_category[0].get("URL") else: # url_filter_category is a dict context_urls = url_filter_category.get("URL", None) # pylint: disable=no-member if type(context_urls) is str: return [context_urls] else: return context_urls def panorama_get_url_category_command(url_cmd: str): """ Get the url category from Palo Alto URL Filtering """ urls = argToList(demisto.args()['url']) categories_dict: Dict[str, list] = {} categories_dict_hr: Dict[str, list] = {} for url in urls: category = panorama_get_url_category(url_cmd, url) if category in categories_dict: categories_dict[category].append(url) categories_dict_hr[category].append(url) else: categories_dict[category] = [url] categories_dict_hr[category] = [url] context_urls = populate_url_filter_category_from_context(category) categories_dict[category] = list((set(categories_dict[category])).union(set(context_urls))) url_category_output_hr = [] for key, value in categories_dict_hr.items(): url_category_output_hr.append({ 'Category': key, 'URL': value }) url_category_output = [] for key, value in categories_dict.items(): url_category_output.append({ 'Category': key, 'URL': value }) title = 'URL Filtering' if url_cmd == 'url-info-cloud': title += ' from cloud' elif url_cmd == 'url-info-host': title += ' from host' human_readable = tableToMarkdown(f'{title}:', url_category_output_hr, ['URL', 'Category'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': categories_dict, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': { "Panorama.URLFilter(val.Category === obj.Category)": url_category_output } }) ''' URL Filter ''' def prettify_get_url_filter(url_filter): pretty_url_filter = {'Name': url_filter['@name']} if DEVICE_GROUP: pretty_url_filter['DeviceGroup'] = DEVICE_GROUP if 'description' in url_filter: pretty_url_filter['Description'] = url_filter['description'] pretty_url_filter['Category'] = [] url_category_list: List[str] = [] action: str if 'alert' in url_filter: url_category_list = url_filter['alert']['member'] action = 'alert' elif 'allow' in url_filter: url_category_list = url_filter['allow']['member'] action = 'allow' elif 'block' in url_filter: url_category_list = url_filter['block']['member'] action = 'block' elif 'continue' in url_filter: url_category_list = url_filter['continue']['member'] action = 'continue' elif 'override' in url_filter: url_category_list = url_filter['override']['member'] action = 'override' for category in url_category_list: pretty_url_filter['Category'].append({ 'Name': category, 'Action': action }) if 'allow-list' in url_filter or 'block-list' in url_filter: pretty_url_filter['Overrides'] = [] if 'allow-list' in url_filter: pretty_url_filter['OverrideAllowList'] = url_filter['allow-list']['member'] else: pretty_url_filter['OverrideBlockList'] = url_filter['block-list']['member'] return pretty_url_filter @logger def panorama_get_url_filter(name): params = { 'action': 'get', 'type': 'config', 'xpath': XPATH_OBJECTS + "profiles/url-filtering/entry[@name='" + name + "']", 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_get_url_filter_command(): """ Get a URL Filter """ name = demisto.args()['name'] url_filter = panorama_get_url_filter(name) url_filter_output = prettify_get_url_filter(url_filter) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': url_filter, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('URL Filter:', url_filter_output, ['Name', 'Category', 'OverrideAllowList', 'OverrideBlockList', 'Description'], removeNull=True), 'EntryContext': { "Panorama.URLFilter(val.Name == obj.Name)": url_filter_output } }) @logger def panorama_create_url_filter( url_filter_name, action, url_category_list, override_allow_list=None, override_block_list=None, description=None): element = add_argument_list(url_category_list, action, True) + add_argument_list(override_allow_list, 'allow-list', True) + add_argument_list( override_block_list, 'block-list', True) + add_argument(description, 'description', False) + "<action>block</action>" params = { 'action': 'set', 'type': 'config', 'xpath': XPATH_OBJECTS + "profiles/url-filtering/entry[@name='" + url_filter_name + "']", 'element': element, 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_create_url_filter_command(): """ Create a URL Filter """ url_filter_name = demisto.args()['name'] action = demisto.args()['action'] url_category_list = argToList(demisto.args()['url_category']) override_allow_list = argToList(demisto.args().get('override_allow_list')) override_block_list = argToList(demisto.args().get('override_block_list')) description = demisto.args().get('description') result = panorama_create_url_filter(url_filter_name, action, url_category_list, override_allow_list, override_block_list, description) url_filter_output = {'Name': url_filter_name} if DEVICE_GROUP: url_filter_output['DeviceGroup'] = DEVICE_GROUP url_filter_output['Category'] = [] for category in url_category_list: url_filter_output['Category'].append({ 'Name': category, 'Action': action }) if override_allow_list: url_filter_output['OverrideAllowList'] = override_allow_list if override_block_list: url_filter_output['OverrideBlockList'] = override_block_list if description: url_filter_output['Description'] = description demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'URL Filter was created successfully.', 'EntryContext': { "Panorama.URLFilter(val.Name == obj.Name)": url_filter_output } }) @logger def panorama_edit_url_filter(url_filter_name, element_to_change, element_value, add_remove_element=None): url_filter_prev = panorama_get_url_filter(url_filter_name) if '@dirtyId' in url_filter_prev: raise Exception('Please commit the instance prior to editing the URL Filter.') url_filter_output = {'Name': url_filter_name} if DEVICE_GROUP: url_filter_output['DeviceGroup'] = DEVICE_GROUP params = { 'action': 'edit', 'type': 'config', 'key': API_KEY, } if element_to_change == 'description': params['xpath'] = XPATH_OBJECTS + "profiles/url-filtering/entry[@name='" + url_filter_name + "']/" + element_to_change params['element'] = add_argument_open(element_value, 'description', False) result = http_request(URL, 'POST', body=params) url_filter_output['Description'] = element_value elif element_to_change == 'override_allow_list': prev_override_allow_list = argToList(url_filter_prev['allow-list']['member']) if add_remove_element == 'add': new_override_allow_list = list((set(prev_override_allow_list)).union(set([element_value]))) else: new_override_allow_list = [url for url in prev_override_allow_list if url != element_value] params['xpath'] = XPATH_OBJECTS + "profiles/url-filtering/entry[@name='" + url_filter_name + "']/allow-list" params['element'] = add_argument_list(new_override_allow_list, 'allow-list', True) result = http_request(URL, 'POST', body=params) url_filter_output[element_to_change] = new_override_allow_list # element_to_change == 'override_block_list' else: prev_override_block_list = argToList(url_filter_prev['block-list']['member']) if add_remove_element == 'add': new_override_block_list = list((set(prev_override_block_list)).union(set([element_value]))) else: new_override_block_list = [url for url in prev_override_block_list if url != element_value] params['xpath'] = XPATH_OBJECTS + "profiles/url-filtering/entry[@name='" + url_filter_name + "']/block-list" params['element'] = add_argument_list(new_override_block_list, 'block-list', True) result = http_request(URL, 'POST', body=params) url_filter_output[element_to_change] = new_override_block_list return result, url_filter_output def panorama_edit_url_filter_command(): """ Edit a URL Filter """ url_filter_name = demisto.args()['name'] element_to_change = demisto.args()['element_to_change'] add_remove_element = demisto.args()['add_remove_element'] element_value = demisto.args()['element_value'] result, url_filter_output = panorama_edit_url_filter(url_filter_name, element_to_change, element_value, add_remove_element) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'URL Filter was edited successfully.', 'EntryContext': { "Panorama.URLFilter(val.Name == obj.Name)": url_filter_output } }) @logger def panorama_delete_url_filter(url_filter_name): params = { 'action': 'delete', 'type': 'config', 'xpath': XPATH_OBJECTS + "profiles/url-filtering/entry[@name='" + url_filter_name + "']", 'element': "<entry name='" + url_filter_name + "'></entry>", 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_delete_url_filter_command(): """ Delete a custom url category """ url_filter_name = demisto.args()['name'] result = panorama_delete_url_filter(url_filter_name) url_filter_output = {'Name': url_filter_name} if DEVICE_GROUP: url_filter_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'URL Filter was deleted successfully.', 'EntryContext': { "Panorama.URLFilter(val.Name == obj.Name)": url_filter_output } }) ''' Security Rules Managing ''' def prettify_rule(rule): pretty_rule = { 'Name': rule['@name'], 'Action': rule['action'] } if DEVICE_GROUP: pretty_rule['DeviceGroup'] = DEVICE_GROUP if '@loc' in rule: pretty_rule['Location'] = rule['@loc'] if 'category' in rule and 'member' in rule['category']: pretty_rule['CustomUrlCategory'] = rule['category']['member'] if 'application' in rule and 'member' in rule['application']: pretty_rule['Application'] = rule['application']['member'] if 'destination' in rule and 'member' in rule['destination']: pretty_rule['Destination'] = rule['destination']['member'] if 'from' in rule and 'member' in rule['from']: pretty_rule['From'] = rule['from']['member'] if 'service' in rule and 'member' in rule['service']: pretty_rule['Service'] = rule['service']['member'] if 'to' in rule and 'member' in rule['to']: pretty_rule['To'] = rule['to']['member'] if 'source' in rule and 'member' in rule['source']: pretty_rule['Source'] = rule['source']['member'] if 'tag' in rule and 'member' in rule['tag']: pretty_rule['Tags'] = rule['tag']['member'] if 'log-setting' in rule and '#text' in rule['log-setting']: pretty_rule['LogForwardingProfile'] = rule['log-setting']['#text'] return pretty_rule def prettify_rules(rules): if not isinstance(rules, list): return prettify_rule(rules) pretty_rules_arr = [] for rule in rules: pretty_rule = prettify_rule(rule) pretty_rules_arr.append(pretty_rule) return pretty_rules_arr @logger def panorama_list_rules(xpath: str, tag: str = None): params = { 'action': 'get', 'type': 'config', 'xpath': xpath, 'key': API_KEY } if tag: params['xpath'] += f'[( tag/member = \'{tag}\')]' result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_list_rules_command(): """ List security rules """ if DEVICE_GROUP: if 'pre_post' not in demisto.args(): raise Exception('Please provide the pre_post argument when listing rules in Panorama instance.') else: xpath = XPATH_SECURITY_RULES + demisto.args()['pre_post'] + '/security/rules/entry' else: xpath = XPATH_SECURITY_RULES tag = demisto.args().get('tag') rules = panorama_list_rules(xpath, tag) pretty_rules = prettify_rules(rules) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': rules, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Security Rules:', pretty_rules, ['Name', 'Location', 'Action', 'From', 'To', 'CustomUrlCategory', 'Service', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.SecurityRule(val.Name == obj.Name)": pretty_rules } }) @logger def panorama_move_rule_command(): """ Move a security rule """ rulename = demisto.args()['rulename'] params = { 'type': 'config', 'action': 'move', 'key': API_KEY, 'where': demisto.args()['where'], } if DEVICE_GROUP: if 'pre_post' not in demisto.args(): raise Exception('Please provide the pre_post argument when moving a rule in Panorama instance.') else: params['xpath'] = XPATH_SECURITY_RULES + demisto.args()[ 'pre_post'] + '/security/rules/entry' + '[@name=\'' + rulename + '\']' else: params['xpath'] = XPATH_SECURITY_RULES + '[@name=\'' + rulename + '\']' if 'dst' in demisto.args(): params['dst'] = demisto.args()['dst'] result = http_request(URL, 'POST', body=params) rule_output = {'Name': rulename} if DEVICE_GROUP: rule_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Rule ' + rulename + ' moved successfully.', 'EntryContext': { "Panorama.SecurityRule(val.Name == obj.Name)": rule_output } }) ''' Security Rule Configuration ''' @logger def panorama_create_rule_command(): """ Create a security rule """ rulename = demisto.args()['rulename'] if 'rulename' in demisto.args() else ('demisto-' + (str(uuid.uuid4()))[:8]) source = argToList(demisto.args().get('source')) destination = argToList(demisto.args().get('destination')) negate_source = demisto.args().get('negate_source') negate_destination = demisto.args().get('negate_destination') action = demisto.args().get('action') service = demisto.args().get('service') disable = demisto.args().get('disable') categories = argToList(demisto.args().get('category')) application = argToList(demisto.args().get('application')) source_user = demisto.args().get('source_user') disable_server_response_inspection = demisto.args().get('disable_server_response_inspection') description = demisto.args().get('description') target = demisto.args().get('target') log_forwarding = demisto.args().get('log_forwarding', None) tags = argToList(demisto.args()['tags']) if 'tags' in demisto.args() else None if not DEVICE_GROUP: if target: raise Exception('The target argument is relevant only for a Palo Alto Panorama instance.') elif log_forwarding: raise Exception('The log_forwarding argument is relevant only for a Palo Alto Panorama instance.') params = prepare_security_rule_params(api_action='set', rulename=rulename, source=source, destination=destination, negate_source=negate_source, negate_destination=negate_destination, action=action, service=service, disable=disable, application=application, source_user=source_user, disable_server_response_inspection=disable_server_response_inspection, description=description, target=target, log_forwarding=log_forwarding, tags=tags, category=categories) result = http_request( URL, 'POST', body=params ) rule_output = {SECURITY_RULE_ARGS[key]: value for key, value in demisto.args().items() if key in SECURITY_RULE_ARGS} rule_output['Name'] = rulename if DEVICE_GROUP: rule_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Rule configured successfully.', 'EntryContext': { "Panorama.SecurityRule(val.Name == obj.Name)": rule_output } }) @logger def panorama_get_current_element(element_to_change: str, xpath: str) -> list: """ Get the current element value from """ params = { 'type': 'config', 'action': 'get', 'xpath': xpath, 'key': API_KEY } try: response = http_request(URL, 'GET', params=params) except PAN_OS_Not_Found: return [] result = response.get('response').get('result') if '@dirtyId' in result: raise Exception('Please commit the instance prior to editing the Security rule.') current_object = result.get(element_to_change) if 'list' in current_object: current_objects_items = argToList(current_object['list']['member']) elif 'member' in current_object: current_objects_items = argToList(current_object.get('member')) return current_objects_items @logger def panorama_edit_rule_items(rulename: str, element_to_change: str, element_value: List[str], behaviour: str): listable_elements = ['source', 'destination', 'application', 'category', 'source-user', 'service', 'tag'] if element_to_change not in listable_elements: raise Exception(f'Adding objects is only available for the following Objects types:{listable_elements}') if element_to_change == 'target' and not DEVICE_GROUP: raise Exception('The target argument is relevant only for a Palo Alto Panorama instance.') params = { 'type': 'config', 'action': 'edit', 'key': API_KEY } if DEVICE_GROUP: if 'pre_post' not in demisto.args(): raise Exception('please provide the pre_post argument when editing a rule in Panorama instance.') else: params['xpath'] = XPATH_SECURITY_RULES + demisto.args()[ 'pre_post'] + '/security/rules/entry' + '[@name=\'' + rulename + '\']' else: params['xpath'] = XPATH_SECURITY_RULES + '[@name=\'' + rulename + '\']' params['xpath'] += '/' + element_to_change current_objects_items = panorama_get_current_element(element_to_change, params['xpath']) if behaviour == 'add': values = list((set(current_objects_items)).union(set(element_value))) else: # remove values = [item for item in current_objects_items if item not in element_value] if not values: raise Exception(f'The object: {element_to_change} must have at least one item.') params['element'] = add_argument_list(values, element_to_change, True) result = http_request(URL, 'POST', body=params) rule_output = { 'Name': rulename, SECURITY_RULE_ARGS[element_to_change]: values } if DEVICE_GROUP: rule_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Rule edited successfully.', 'EntryContext': { "Panorama.SecurityRule(val.Name == obj.Name)": rule_output } }) @logger def panorama_edit_rule_command(): """ Edit a security rule """ rulename = demisto.args()['rulename'] element_to_change = demisto.args()['element_to_change'] if element_to_change == 'log-forwarding': element_to_change = 'log-setting' element_value = demisto.args()['element_value'] if element_to_change == 'target' and not DEVICE_GROUP: raise Exception('The target argument is relevant only for a Palo Alto Panorama instance.') behaviour = demisto.args().get('behaviour') if 'behaviour' in demisto.args() else 'replace' if behaviour != 'replace': panorama_edit_rule_items(rulename, element_to_change, argToList(element_value), behaviour) else: params = { 'type': 'config', 'action': 'edit', 'key': API_KEY } if element_to_change in ['action', 'description', 'log-setting']: params['element'] = add_argument_open(element_value, element_to_change, False) elif element_to_change in ['source', 'destination', 'application', 'category', 'source-user', 'service', 'tag']: element_value = argToList(element_value) params['element'] = add_argument_list(element_value, element_to_change, True) elif element_to_change == 'target': params['element'] = add_argument_target(element_value, 'target') else: params['element'] = add_argument_yes_no(element_value, element_to_change) if DEVICE_GROUP: if 'pre_post' not in demisto.args(): raise Exception('please provide the pre_post argument when editing a rule in Panorama instance.') else: params['xpath'] = XPATH_SECURITY_RULES + demisto.args()[ 'pre_post'] + '/security/rules/entry' + '[@name=\'' + rulename + '\']' else: params['xpath'] = XPATH_SECURITY_RULES + '[@name=\'' + rulename + '\']' params['xpath'] += '/' + element_to_change result = http_request(URL, 'POST', body=params) rule_output = { 'Name': rulename, SECURITY_RULE_ARGS[element_to_change]: element_value } if DEVICE_GROUP: rule_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Rule edited successfully.', 'EntryContext': { "Panorama.SecurityRule(val.Name == obj.Name)": rule_output } }) @logger def panorama_delete_rule_command(): """ Delete a security rule """ rulename = demisto.args()['rulename'] params = { 'type': 'config', 'action': 'delete', 'key': API_KEY } if DEVICE_GROUP: if 'pre_post' not in demisto.args(): raise Exception('Please provide the pre_post argument when moving a rule in Panorama instance.') else: params['xpath'] = XPATH_SECURITY_RULES + demisto.args()[ 'pre_post'] + '/security/rules/entry' + '[@name=\'' + rulename + '\']' else: params['xpath'] = XPATH_SECURITY_RULES + '[@name=\'' + rulename + '\']' result = http_request( URL, 'POST', body=params ) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Rule deleted successfully.', }) @logger def panorama_custom_block_rule_command(): """ Block an object in Panorama """ object_type = demisto.args()['object_type'] object_value = argToList(demisto.args()['object_value']) direction = demisto.args()['direction'] if 'direction' in demisto.args() else 'both' rulename = demisto.args()['rulename'] if 'rulename' in demisto.args() else ('demisto-' + (str(uuid.uuid4()))[:8]) block_destination = False if direction == 'from' else True block_source = False if direction == 'to' else True target = argToList(demisto.args().get('target')) if 'target' in demisto.args() else None log_forwarding = demisto.args().get('log_forwarding', None) tags = argToList(demisto.args()['tags']) if 'tags' in demisto.args() else None if not DEVICE_GROUP: if target: raise Exception('The target argument is relevant only for a Palo Alto Panorama instance.') elif log_forwarding: raise Exception('The log_forwarding argument is relevant only for a Palo Alto Panorama instance.') custom_block_output = { 'Name': rulename, 'Direction': direction, 'Disabled': False } if DEVICE_GROUP: custom_block_output['DeviceGroup'] = DEVICE_GROUP if log_forwarding: custom_block_output['LogForwarding'] = log_forwarding if target: custom_block_output['Target'] = target if tags: custom_block_output['Tags'] = tags if object_type == 'ip': if block_source: params = prepare_security_rule_params(api_action='set', action='drop', source=object_value, destination=['any'], rulename=rulename + '-from', target=target, log_forwarding=log_forwarding, tags=tags) result = http_request(URL, 'POST', body=params) if block_destination: params = prepare_security_rule_params(api_action='set', action='drop', destination=object_value, source=['any'], rulename=rulename + '-to', target=target, log_forwarding=log_forwarding, tags=tags) result = http_request(URL, 'POST', body=params) custom_block_output['IP'] = object_value elif object_type in ['address-group', 'edl']: if block_source: params = prepare_security_rule_params(api_action='set', action='drop', source=object_value, destination=['any'], rulename=rulename + '-from', target=target, log_forwarding=log_forwarding, tags=tags) result = http_request(URL, 'POST', body=params) if block_destination: params = prepare_security_rule_params(api_action='set', action='drop', destination=object_value, source=['any'], rulename=rulename + '-to', target=target, log_forwarding=log_forwarding, tags=tags) result = http_request(URL, 'POST', body=params) custom_block_output['AddressGroup'] = object_value elif object_type == 'url-category': params = prepare_security_rule_params(api_action='set', action='drop', source=['any'], destination=['any'], category=object_value, rulename=rulename, target=target, log_forwarding=log_forwarding, tags=tags) result = http_request(URL, 'POST', body=params) custom_block_output['CustomURLCategory'] = object_value elif object_type == 'application': params = prepare_security_rule_params(api_action='set', action='drop', source=['any'], destination=['any'], application=object_value, rulename=rulename, target=target, log_forwarding=log_forwarding, tags=tags) result = http_request(URL, 'POST', body=params) custom_block_output['Application'] = object_value demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Object was blocked successfully.', 'EntryContext': { "Panorama.SecurityRule(val.Name == obj.Name)": custom_block_output } }) ''' PCAPS ''' @logger def panorama_list_pcaps_command(): """ Get list of pcap files """ if DEVICE_GROUP: raise Exception('PCAP listing is only supported on Firewall (not Panorama).') pcap_type = demisto.args()['pcapType'] params = { 'type': 'export', 'key': API_KEY, 'category': pcap_type } if 'password' in demisto.args(): params['dlp-password'] = demisto.args()['password'] elif demisto.args()['pcapType'] == 'dlp-pcap': raise Exception('can not provide dlp-pcap without password') result = http_request(URL, 'GET', params=params) json_result = json.loads(xml2json(result.text))['response'] if json_result['@status'] != 'success': raise Exception('Request to get list of Pcaps Failed.\nStatus code: ' + str( json_result['response']['@code']) + '\nWith message: ' + str(json_result['response']['msg']['line'])) dir_listing = json_result['result']['dir-listing'] if 'file' not in dir_listing: demisto.results(f'PAN-OS has no Pcaps of type: {pcap_type}.') else: pcaps = dir_listing['file'] pcap_list = [pcap[1:] for pcap in pcaps] demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': json_result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('List of Pcaps:', pcap_list, ['Pcap name']), 'EntryContext': { "Panorama.Pcaps(val.Name == obj.Name)": pcap_list } }) def validate_search_time(search_time: str) -> str: """ Validate search_time is of format YYYY/MM/DD HH:MM:SS or YYYY/MM/DD and pad with zeroes """ try: datetime.strptime(search_time, '%Y/%m/%d') search_time += ' 00:00:00' return search_time except ValueError: pass try: datetime.strptime(search_time, '%Y/%m/%d %H:%M:%S') return search_time except ValueError as err: raise ValueError(f"Incorrect data format. searchTime should be of: YYYY/MM/DD HH:MM:SS or YYYY/MM/DD.\n" f"Error is: {str(err)}") @logger def panorama_get_pcap_command(): """ Get pcap file """ if DEVICE_GROUP: raise Exception('Getting a PCAP file is only supported on Firewall (not Panorama).') pcap_type = demisto.args()['pcapType'] params = { 'type': 'export', 'key': API_KEY, 'category': pcap_type } password = demisto.args().get('password') pcap_id = demisto.args().get('pcapID') search_time = demisto.args().get('searchTime') if pcap_type == 'dlp-pcap' and not password: raise Exception('Can not provide dlp-pcap without password.') else: params['dlp-password'] = password if pcap_type == 'threat-pcap' and (not pcap_id or not search_time): raise Exception('Can not provide threat-pcap without pcap-id and the searchTime arguments.') pcap_name = demisto.args().get('from') local_name = demisto.args().get('localName') serial_no = demisto.args().get('serialNo') search_time = demisto.args().get('searchTime') file_name = None if pcap_id: params['pcap-id'] = pcap_id if pcap_name: params['from'] = pcap_name file_name = pcap_name if local_name: params['to'] = local_name file_name = local_name if serial_no: params['serialno'] = serial_no if search_time: search_time = validate_search_time(search_time) params['search-time'] = search_time # set file name to the current time if from/to were not specified if not file_name: file_name = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S') result = http_request(URL, 'GET', params=params) # due pcap file size limitation in the product, for more details, please see the documentation. if result.headers['Content-Type'] != 'application/octet-stream': raise Exception( 'PCAP download failed. Most likely cause is the file size limitation.\n' 'For information on how to download manually, see the documentation for this integration.') file = fileResult(file_name + ".pcap", result.content) demisto.results(file) ''' Applications ''' def prettify_applications_arr(applications_arr): pretty_application_arr = [] if not isinstance(applications_arr, list): applications_arr = [applications_arr] for i in range(len(applications_arr)): application = applications_arr[i] pretty_application_arr.append({ 'SubCategory': application.get('subcategory'), 'Risk': application.get('risk'), 'Technology': application.get('technology'), 'Name': application.get('@name'), 'Description': application.get('description'), 'Id': application.get('@id'), }) return pretty_application_arr @logger def panorama_list_applications(predefined: bool): major_version = get_pan_os_major_version() params = { 'type': 'config', 'action': 'get', 'key': API_KEY } if predefined: if major_version < 9: raise Exception('Listing predefined applications is only available for PAN-OS 9.X and above versions.') else: params['xpath'] = '/config/predefined/application' else: params['xpath'] = XPATH_OBJECTS + "application/entry" result = http_request( URL, 'POST', body=params ) applications = result['response']['result'] if predefined: application_arr = applications.get('application', {}).get('entry') else: if major_version < 9: application_arr = applications.get('entry') else: application_arr = applications.get('application') return application_arr def panorama_list_applications_command(): """ List all applications """ predefined = str(demisto.args().get('predefined', '')) == 'true' applications_arr = panorama_list_applications(predefined) applications_arr_output = prettify_applications_arr(applications_arr) headers = ['Id', 'Name', 'Risk', 'Category', 'SubCategory', 'Technology', 'Description'] demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': applications_arr, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Applications', t=applications_arr_output, headers=headers), 'EntryContext': { "Panorama.Applications(val.Name == obj.Name)": applications_arr_output } }) ''' External Dynamic Lists Commands ''' def prettify_edls_arr(edls_arr): pretty_edls_arr = [] if not isinstance(edls_arr, list): # handle case of only one edl in the instance return prettify_edl(edls_arr) for edl in edls_arr: pretty_edl = { 'Name': edl['@name'], 'Type': ''.join(edl['type'].keys()) } edl_type = pretty_edl['Type'] if edl['type'][edl_type]: if 'url' in edl['type'][edl_type]: pretty_edl['URL'] = edl['type'][edl_type]['url'] if 'certificate-profile' in edl['type'][edl_type]: pretty_edl['CertificateProfile'] = edl['type'][edl_type]['certificate-profile'] if 'recurring' in edl['type'][edl_type]: pretty_edl['Recurring'] = ''.join(edl['type'][edl_type]['recurring'].keys()) if 'description' in edl['type'][edl_type]: pretty_edl['Description'] = edl['type'][edl_type]['description'] if DEVICE_GROUP: pretty_edl['DeviceGroup'] = DEVICE_GROUP pretty_edls_arr.append(pretty_edl) return pretty_edls_arr @logger def panorama_list_edls(): params = { 'action': 'get', 'type': 'config', 'xpath': XPATH_OBJECTS + "external-list/entry", 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_list_edls_command(): """ Get all EDLs """ edls_arr = panorama_list_edls() edls_output = prettify_edls_arr(edls_arr) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': edls_arr, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('External Dynamic Lists:', edls_output, ['Name', 'Type', 'URL', 'Recurring', 'CertificateProfile', 'Description'], removeNull=True), 'EntryContext': { "Panorama.EDL(val.Name == obj.Name)": edls_output } }) def prettify_edl(edl): pretty_edl = { 'Name': edl['@name'], 'Type': ''.join(edl['type'].keys()) } edl_type = pretty_edl['Type'] if edl['type'][edl_type]: if 'url' in edl['type'][edl_type]: pretty_edl['URL'] = edl['type'][edl_type]['url'] if 'certificate-profile' in edl['type'][edl_type]: pretty_edl['CertificateProfile'] = edl['type'][edl_type]['certificate-profile'] if 'recurring' in edl['type'][edl_type]: pretty_edl['Recurring'] = ''.join(edl['type'][edl_type]['recurring'].keys()) if 'description' in edl['type'][edl_type]: pretty_edl['Description'] = edl['type'][edl_type]['description'] if DEVICE_GROUP: pretty_edl['DeviceGroup'] = DEVICE_GROUP return pretty_edl @logger def panorama_get_edl(edl_name): params = { 'action': 'show', 'type': 'config', 'xpath': XPATH_OBJECTS + "external-list/entry[@name='" + edl_name + "']", 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_get_edl_command(): """ Get an EDL """ edl_name = demisto.args()['name'] edl = panorama_get_edl(edl_name) edl_output = prettify_edl(edl) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': edl, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('External Dynamic List:', edl_output, ['Name', 'Type', 'URL', 'Recurring', 'CertificateProfile', 'Description'], None, True), 'EntryContext': { "Panorama.EDL(val.Name == obj.Name)": edl_output } }) @logger def panorama_create_edl(edl_name, url, type_, recurring, certificate_profile=None, description=None): params = { 'action': 'set', 'type': 'config', 'xpath': XPATH_OBJECTS + "external-list/entry[@name='" + edl_name + "']/type/" + type_, 'key': API_KEY } params['element'] = add_argument(url, 'url', False) + '<recurring><' + recurring + '/></recurring>' + add_argument( certificate_profile, 'certificate-profile', False) + add_argument(description, 'description', False) result = http_request( URL, 'POST', body=params, ) return result def panorama_create_edl_command(): """ Create an edl object """ edl_name = demisto.args().get('name') url = demisto.args().get('url').replace(' ', '%20') type_ = demisto.args().get('type') recurring = demisto.args().get('recurring') certificate_profile = demisto.args().get('certificate_profile') description = demisto.args().get('description') edl = panorama_create_edl(edl_name, url, type_, recurring, certificate_profile, description) edl_output = { 'Name': edl_name, 'URL': url, 'Type': type_, 'Recurring': recurring } if DEVICE_GROUP: edl_output['DeviceGroup'] = DEVICE_GROUP if description: edl_output['Description'] = description if certificate_profile: edl_output['CertificateProfile'] = certificate_profile demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': edl, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'External Dynamic List was created successfully.', 'EntryContext': { "Panorama.EDL(val.Name == obj.Name)": edl_output } }) @logger def panorama_edit_edl(edl_name, element_to_change, element_value): edl_prev = panorama_get_edl(edl_name) if '@dirtyId' in edl_prev: raise Exception('Please commit the instance prior to editing the External Dynamic List') edl_type = ''.join(edl_prev['type'].keys()) edl_output = {'Name': edl_name} if DEVICE_GROUP: edl_output['DeviceGroup'] = DEVICE_GROUP params = {'action': 'edit', 'type': 'config', 'key': API_KEY, 'xpath': XPATH_OBJECTS + "external-list/entry[@name='" + edl_name + "']/type/" + edl_type + "/" + element_to_change} if element_to_change == 'url': params['element'] = add_argument_open(element_value, 'url', False) result = http_request(URL, 'POST', body=params) edl_output['URL'] = element_value elif element_to_change == 'certificate_profile': params['element'] = add_argument_open(element_value, 'certificate-profile', False) result = http_request(URL, 'POST', body=params) edl_output['CertificateProfile'] = element_value elif element_to_change == 'description': params['element'] = add_argument_open(element_value, 'description', False) result = http_request(URL, 'POST', body=params) edl_output['Description'] = element_value # element_to_change == 'recurring' else: if element_value not in ['five-minute', 'hourly']: raise Exception('Recurring segment must be five-minute or hourly') params['element'] = '<recurring><' + element_value + '/></recurring>' result = http_request(URL, 'POST', body=params) edl_output['Recurring'] = element_value return result, edl_output def panorama_edit_edl_command(): """ Edit an EDL """ edl_name = demisto.args()['name'] element_to_change = demisto.args()['element_to_change'] element_value = demisto.args()['element_value'] result, edl_output = panorama_edit_edl(edl_name, element_to_change, element_value) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'External Dynamic List was edited successfully', 'EntryContext': { "Panorama.EDL(val.Name == obj.Name)": edl_output } }) @logger def panorama_delete_edl(edl_name): params = { 'action': 'delete', 'type': 'config', 'xpath': XPATH_OBJECTS + "external-list/entry[@name='" + edl_name + "']", 'element': "<entry name='" + edl_name + "'></entry>", 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_delete_edl_command(): """ Delete an EDL """ edl_name = demisto.args()['name'] edl = panorama_delete_edl(edl_name) edl_output = {'Name': edl_name} if DEVICE_GROUP: edl_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': edl, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'External Dynamic List was deleted successfully', 'EntryContext': { "Panorama.EDL(val.Name == obj.Name)": edl_output } }) def panorama_refresh_edl(edl_name): edl = panorama_get_edl(edl_name) edl_type = ''.join(edl['type'].keys()) params = { 'type': 'op', 'cmd': '<request><system><external-list><refresh><type><' + edl_type + '><name>' + edl_name + '</name></' + edl_type + '></type></refresh></external-list></system></request>', 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_refresh_edl_command(): """ Refresh an EDL """ if DEVICE_GROUP: raise Exception('EDL refresh is only supported on Firewall (not Panorama).') edl_name = demisto.args()['name'] result = panorama_refresh_edl(edl_name) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Refreshed External Dynamic List successfully', }) ''' IP Tags ''' @logger def panorama_register_ip_tag(tag: str, ips: List, persistent: str): entry: str = '' for ip in ips: entry += f'<entry ip=\"{ip}\" persistent=\"{persistent}\"><tag><member>{tag}</member></tag></entry>' params = { 'type': 'user-id', 'cmd': '<uid-message><version>2.0</version><type>update</type><payload><register>' + entry + '</register></payload></uid-message>', 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_register_ip_tag_command(): """ Register IPs to a Tag """ tag = demisto.args()['tag'] ips = argToList(demisto.args()['IPs']) persistent = demisto.args()['persistent'] if 'persistent' in demisto.args() else 'true' persistent = '1' if persistent == 'true' else '0' result = panorama_register_ip_tag(tag, ips, str(persistent)) registered_ip: Dict[str, str] = {} # update context only if IPs are persistent if persistent == '1': # get existing IPs for this tag context_ips = demisto.dt(demisto.context(), 'Panorama.DynamicTags(val.Tag ==\"' + tag + '\").IPs') if context_ips: all_ips = ips + context_ips else: all_ips = ips registered_ip = { 'Tag': tag, 'IPs': all_ips } demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Registered ip-tag successfully', 'EntryContext': { "Panorama.DynamicTags(val.Tag == obj.Tag)": registered_ip } }) @logger def panorama_unregister_ip_tag(tag: str, ips: list): entry = '' for ip in ips: entry += '<entry ip=\"' + ip + '\"><tag><member>' + tag + '</member></tag></entry>' params = { 'type': 'user-id', 'cmd': '<uid-message><version>2.0</version><type>update</type><payload><unregister>' + entry + '</unregister></payload></uid-message>', 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_unregister_ip_tag_command(): """ Register IPs to a Tag """ tag = demisto.args()['tag'] ips = argToList(demisto.args()['IPs']) result = panorama_unregister_ip_tag(tag, ips) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Unregistered ip-tag successfully' }) ''' User Tags ''' @logger def panorama_register_user_tag(tag: str, users: List): entry: str = '' for user in users: entry += f'<entry user=\"{user}\"><tag><member>{tag}</member></tag></entry>' params = { 'type': 'user-id', 'cmd': f'<uid-message><version>2.0</version><type>update</type><payload><register-user>{entry}' f'</register-user></payload></uid-message>', 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_register_user_tag_command(): """ Register Users to a Tag """ major_version = get_pan_os_major_version() if major_version <= 8: raise Exception('The panorama-register-user-tag command is only available for PAN-OS 9.X and above versions.') tag = demisto.args()['tag'] users = argToList(demisto.args()['Users']) result = panorama_register_user_tag(tag, users) # get existing Users for this tag context_users = demisto.dt(demisto.context(), 'Panorama.DynamicTags(val.Tag ==\"' + tag + '\").Users') if context_users: all_users = users + context_users else: all_users = users registered_user = { 'Tag': tag, 'Users': all_users } demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Registered user-tag successfully', 'EntryContext': { "Panorama.DynamicTags(val.Tag == obj.Tag)": registered_user } }) @logger def panorama_unregister_user_tag(tag: str, users: list): entry = '' for user in users: entry += f'<entry user=\"{user}\"><tag><member>{tag}</member></tag></entry>' params = { 'type': 'user-id', 'cmd': f'<uid-message><version>2.0</version><type>update</type><payload><unregister-user>{entry}' f'</unregister-user></payload></uid-message>', 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_unregister_user_tag_command(): """ Unregister Users from a Tag """ major_version = get_pan_os_major_version() if major_version <= 8: raise Exception('The panorama-unregister-user-tag command is only available for PAN-OS 9.X and above versions.') tag = demisto.args()['tag'] users = argToList(demisto.args()['Users']) result = panorama_unregister_user_tag(tag, users) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Unregistered user-tag successfully' }) ''' Traffic Logs ''' def build_traffic_logs_query(source=None, destination=None, receive_time=None, application=None, to_port=None, action=None): query = '' if source and len(source) > 0: query += '(addr.src in ' + source + ')' if destination and len(destination) > 0: if len(query) > 0 and query[-1] == ')': query += ' and ' query += '(addr.dst in ' + source + ')' if receive_time and len(receive_time) > 0: if len(query) > 0 and query[-1] == ')': query += ' and ' query += '(receive_time geq ' + receive_time + ')' if application and len(application) > 0: if len(query) > 0 and query[-1] == ')': query += ' and ' query += '(app eq ' + application + ')' if to_port and len(to_port) > 0: if len(query) > 0 and query[-1] == ')': query += ' and ' query += '(port.dst eq ' + to_port + ')' if action and len(action) > 0: if len(query) > 0 and query[-1] == ')': query += ' and ' query += '(action eq ' + action + ')' return query @logger def panorama_query_traffic_logs(number_of_logs, direction, query, source, destination, receive_time, application, to_port, action): params = { 'type': 'log', 'log-type': 'traffic', 'key': API_KEY } if query and len(query) > 0: params['query'] = query else: params['query'] = build_traffic_logs_query(source, destination, receive_time, application, to_port, action) if number_of_logs: params['nlogs'] = number_of_logs if direction: params['dir'] = direction result = http_request( URL, 'GET', params=params, ) return result def panorama_query_traffic_logs_command(): """ Query the traffic logs """ number_of_logs = demisto.args().get('number_of_logs') direction = demisto.args().get('direction') query = demisto.args().get('query') source = demisto.args().get('source') destination = demisto.args().get('destination') receive_time = demisto.args().get('receive_time') application = demisto.args().get('application') to_port = demisto.args().get('to_port') action = demisto.args().get('action') if query and (source or destination or receive_time or application or to_port or action): raise Exception('Use the query argument or the ' 'source, destination, receive_time, application, to_port, action arguments to build your query') result = panorama_query_traffic_logs(number_of_logs, direction, query, source, destination, receive_time, application, to_port, action) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query traffic logs failed' + message) else: raise Exception('Query traffic logs failed.') if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']: raise Exception('Missing JobID in response.') query_traffic_output = { 'JobID': result['response']['result']['job'], 'Status': 'Pending' } demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Traffic Logs:', query_traffic_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {"Panorama.TrafficLogs(val.JobID == obj.JobID)": query_traffic_output} }) @logger def panorama_get_traffic_logs(job_id): params = { 'action': 'get', 'type': 'log', 'job-id': job_id, 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result def panorama_check_traffic_logs_status_command(): job_id = demisto.args().get('job_id') result = panorama_get_traffic_logs(job_id) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query traffic logs failed' + message) else: raise Exception('Query traffic logs failed.') query_traffic_status_output = { 'JobID': job_id, 'Status': 'Pending' } if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \ or 'status' not in result['response']['result']['job']: raise Exception('Missing JobID status in response.') if result['response']['result']['job']['status'] == 'FIN': query_traffic_status_output['Status'] = 'Completed' demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Traffic Logs status:', query_traffic_status_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {"Panorama.TrafficLogs(val.JobID == obj.JobID)": query_traffic_status_output} }) def prettify_traffic_logs(traffic_logs): pretty_traffic_logs_arr = [] for traffic_log in traffic_logs: pretty_traffic_log = {} if 'action' in traffic_log: pretty_traffic_log['Action'] = traffic_log['action'] if 'action_source' in traffic_log: pretty_traffic_log['ActionSource'] = traffic_log['action_source'] if 'application' in traffic_log: pretty_traffic_log['Application'] = traffic_log['application'] if 'category' in traffic_log: pretty_traffic_log['Category'] = traffic_log['category'] if 'device_name' in traffic_log: pretty_traffic_log['DeviceName'] = traffic_log['device_name'] if 'dst' in traffic_log: pretty_traffic_log['Destination'] = traffic_log['dst'] if 'dport' in traffic_log: pretty_traffic_log['DestinationPort'] = traffic_log['dport'] if 'from' in traffic_log: pretty_traffic_log['FromZone'] = traffic_log['from'] if 'proto' in traffic_log: pretty_traffic_log['Protocol'] = traffic_log['proto'] if 'rule' in traffic_log: pretty_traffic_log['Rule'] = traffic_log['rule'] if 'receive_time' in traffic_log: pretty_traffic_log['ReceiveTime'] = traffic_log['receive_time'] if 'session_end_reason' in traffic_log: pretty_traffic_log['SessionEndReason'] = traffic_log['session_end_reason'] if 'src' in traffic_log: pretty_traffic_log['Source'] = traffic_log['src'] if 'sport' in traffic_log: pretty_traffic_log['SourcePort'] = traffic_log['sport'] if 'start' in traffic_log: pretty_traffic_log['StartTime'] = traffic_log['start'] if 'to' in traffic_log: pretty_traffic_log['ToZone'] = traffic_log['to'] pretty_traffic_logs_arr.append(pretty_traffic_log) return pretty_traffic_logs_arr def panorama_get_traffic_logs_command(): job_id = demisto.args().get('job_id') result = panorama_get_traffic_logs(job_id) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query traffic logs failed' + message) else: raise Exception('Query traffic logs failed.') query_traffic_logs_output = { 'JobID': job_id, 'Status': 'Pending' } if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \ or 'status' not in result['response']['result']['job']: raise Exception('Missing JobID status in response.') if result['response']['result']['job']['status'] != 'FIN': demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Traffic Logs status:', query_traffic_logs_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {"Panorama.TrafficLogs(val.JobID == obj.JobID)": query_traffic_logs_output} }) else: # FIN query_traffic_logs_output['Status'] = 'Completed' if 'response' not in result or 'result' not in result['response'] or 'log' not in result['response']['result'] \ or 'logs' not in result['response']['result']['log']: raise Exception('Missing logs in response.') logs = result['response']['result']['log']['logs'] if logs['@count'] == '0': demisto.results('No traffic logs matched the query') else: pretty_traffic_logs = prettify_traffic_logs(logs['entry']) query_traffic_logs_output['Logs'] = pretty_traffic_logs demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Traffic Logs:', pretty_traffic_logs, ['JobID', 'Source', 'SourcePort', 'Destination', 'DestinationPort', 'Application', 'Action'], removeNull=True), 'EntryContext': {"Panorama.TrafficLogs(val.JobID == obj.JobID)": query_traffic_logs_output} }) ''' Logs ''' def build_array_query(query, arg_string, string, operator): list_string = argToList(arg_string) list_string_length = len(list_string) if list_string_length > 1: query += '(' for i, item in enumerate(list_string): query += f'({string} {operator} \'{item}\')' if i < list_string_length - 1: query += ' or ' if list_string_length > 1: query += ')' return query def build_logs_query(address_src=None, address_dst=None, ip_=None, zone_src=None, zone_dst=None, time_generated=None, action=None, port_dst=None, rule=None, url=None, filedigest=None): query = '' if address_src: query += build_array_query(query, address_src, 'addr.src', 'in') if address_dst: if len(query) > 0 and query[-1] == ')': query += ' and ' query += build_array_query(query, address_dst, 'addr.dst', 'in') if ip_: if len(query) > 0 and query[-1] == ')': query += ' and ' query = build_array_query(query, ip_, 'addr.src', 'in') query += ' or ' query = build_array_query(query, ip_, 'addr.dst', 'in') if zone_src: if len(query) > 0 and query[-1] == ')': query += ' and ' query += build_array_query(query, zone_src, 'zone.src', 'eq') if zone_dst: if len(query) > 0 and query[-1] == ')': query += ' and ' query += build_array_query(query, zone_dst, 'zone.dst', 'eq') if port_dst: if len(query) > 0 and query[-1] == ')': query += ' and ' query += build_array_query(query, port_dst, 'port.dst', 'eq') if time_generated: if len(query) > 0 and query[-1] == ')': query += ' and ' query += '(time_generated leq ' + time_generated + ')' if action: if len(query) > 0 and query[-1] == ')': query += ' and ' query += build_array_query(query, action, 'action', 'eq') if rule: if len(query) > 0 and query[-1] == ')': query += ' and ' query += build_array_query(query, rule, 'rule', 'eq') if url: if len(query) > 0 and query[-1] == ')': query += ' and ' query += build_array_query(query, url, 'url', 'contains') if filedigest: if len(query) > 0 and query[-1] == ')': query += ' and ' query += build_array_query(query, filedigest, 'filedigest', 'eq') return query @logger def panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_, zone_src, zone_dst, time_generated, action, port_dst, rule, url, filedigest): params = { 'type': 'log', 'log-type': log_type, 'key': API_KEY } if filedigest and log_type != 'wildfire': raise Exception('The filedigest argument is only relevant to wildfire log type.') if url and log_type == 'traffic': raise Exception('The url argument is not relevant to traffic log type.') if query: params['query'] = query else: if ip_ and (address_src or address_dst): raise Exception('The ip argument cannot be used with the address-source or the address-destination arguments.') params['query'] = build_logs_query(address_src, address_dst, ip_, zone_src, zone_dst, time_generated, action, port_dst, rule, url, filedigest) if number_of_logs: params['nlogs'] = number_of_logs result = http_request( URL, 'GET', params=params, ) return result def panorama_query_logs_command(): """ Query logs """ log_type = demisto.args().get('log-type') number_of_logs = demisto.args().get('number_of_logs') query = demisto.args().get('query') address_src = demisto.args().get('addr-src') address_dst = demisto.args().get('addr-dst') ip_ = demisto.args().get('ip') zone_src = demisto.args().get('zone-src') zone_dst = demisto.args().get('zone-dst') time_generated = demisto.args().get('time-generated') action = demisto.args().get('action') port_dst = demisto.args().get('port-dst') rule = demisto.args().get('rule') filedigest = demisto.args().get('filedigest') url = demisto.args().get('url') if url and url[-1] != '/': url += '/' if query and (address_src or address_dst or zone_src or zone_dst or time_generated or action or port_dst or rule or url or filedigest): raise Exception('Use the free query argument or the fixed search parameters arguments to build your query.') result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_, zone_src, zone_dst, time_generated, action, port_dst, rule, url, filedigest) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']: raise Exception('Missing JobID in response.') query_logs_output = { 'JobID': result['response']['result']['job'], 'Status': 'Pending', 'LogType': log_type, 'Message': result['response']['result']['msg']['line'] } demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Logs:', query_logs_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {"Panorama.Monitor(val.JobID == obj.JobID)": query_logs_output} }) def panorama_check_logs_status_command(): """ Check query logs status """ job_ids = argToList(demisto.args().get('job_id')) for job_id in job_ids: result = panorama_get_traffic_logs(job_id) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') query_logs_status_output = { 'JobID': job_id, 'Status': 'Pending' } if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \ or 'status' not in result['response']['result']['job']: raise Exception('Missing JobID status in response.') if result['response']['result']['job']['status'] == 'FIN': query_logs_status_output['Status'] = 'Completed' demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Logs status:', query_logs_status_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {"Panorama.Monitor(val.JobID == obj.JobID)": query_logs_status_output} }) def prettify_log(log): pretty_log = {} if 'action' in log: pretty_log['Action'] = log['action'] if 'app' in log: pretty_log['Application'] = log['app'] if 'category' in log: pretty_log['CategoryOrVerdict'] = log['category'] if 'device_name' in log: pretty_log['DeviceName'] = log['device_name'] if 'dst' in log: pretty_log['DestinationAddress'] = log['dst'] if 'dstuser' in log: pretty_log['DestinationUser'] = log['dstuser'] if 'dstloc' in log: pretty_log['DestinationCountry'] = log['dstloc'] if 'dport' in log: pretty_log['DestinationPort'] = log['dport'] if 'filedigest' in log: pretty_log['FileDigest'] = log['filedigest'] if 'filename' in log: pretty_log['FileName'] = log['filename'] if 'filetype' in log: pretty_log['FileType'] = log['filetype'] if 'from' in log: pretty_log['FromZone'] = log['from'] if 'misc' in log: pretty_log['URLOrFilename'] = log['misc'] if 'natdst' in log: pretty_log['NATDestinationIP'] = log['natdst'] if 'natdport' in log: pretty_log['NATDestinationPort'] = log['natdport'] if 'natsrc' in log: pretty_log['NATSourceIP'] = log['natsrc'] if 'natsport' in log: pretty_log['NATSourcePort'] = log['natsport'] if 'pcap_id' in log: pretty_log['PCAPid'] = log['pcap_id'] if 'proto' in log: pretty_log['IPProtocol'] = log['proto'] if 'recipient' in log: pretty_log['Recipient'] = log['recipient'] if 'rule' in log: pretty_log['Rule'] = log['rule'] if 'rule_uuid' in log: pretty_log['RuleID'] = log['rule_uuid'] if 'receive_time' in log: pretty_log['ReceiveTime'] = log['receive_time'] if 'sender' in log: pretty_log['Sender'] = log['sender'] if 'sessionid' in log: pretty_log['SessionID'] = log['sessionid'] if 'serial' in log: pretty_log['DeviceSN'] = log['serial'] if 'severity' in log: pretty_log['Severity'] = log['severity'] if 'src' in log: pretty_log['SourceAddress'] = log['src'] if 'srcloc' in log: pretty_log['SourceCountry'] = log['srcloc'] if 'srcuser' in log: pretty_log['SourceUser'] = log['srcuser'] if 'sport' in log: pretty_log['SourcePort'] = log['sport'] if 'thr_category' in log: pretty_log['ThreatCategory'] = log['thr_category'] if 'threatid' in log: pretty_log['Name'] = log['threatid'] if 'tid' in log: pretty_log['ID'] = log['tid'] if 'to' in log: pretty_log['ToZone'] = log['to'] if 'time_generated' in log: pretty_log['TimeGenerated'] = log['time_generated'] if 'url_category_list' in log: pretty_log['URLCategoryList'] = log['url_category_list'] return pretty_log def prettify_logs(logs): if not isinstance(logs, list): # handle case of only one log that matched the query return prettify_log(logs) pretty_logs_arr = [] for log in logs: pretty_log = prettify_log(log) pretty_logs_arr.append(pretty_log) return pretty_logs_arr def panorama_get_logs_command(): ignore_auto_extract = demisto.args().get('ignore_auto_extract') == 'true' job_ids = argToList(demisto.args().get('job_id')) for job_id in job_ids: result = panorama_get_traffic_logs(job_id) log_type_dt = demisto.dt(demisto.context(), f'Panorama.Monitor(val.JobID === "{job_id}").LogType') if isinstance(log_type_dt, list): log_type = log_type_dt[0] else: log_type = log_type_dt if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') query_logs_output = { 'JobID': job_id, 'Status': 'Pending' } if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \ or 'status' not in result['response']['result']['job']: raise Exception('Missing JobID status in response.') if result['response']['result']['job']['status'] != 'FIN': demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Logs status:', query_logs_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {"Panorama.Monitor(val.JobID == obj.JobID)": query_logs_output} }) else: # FIN query_logs_output['Status'] = 'Completed' if 'response' not in result or 'result' not in result['response'] or 'log' not in result['response']['result'] \ or 'logs' not in result['response']['result']['log']: raise Exception('Missing logs in response.') logs = result['response']['result']['log']['logs'] if logs['@count'] == '0': human_readable = f'No {log_type} logs matched the query.' else: pretty_logs = prettify_logs(logs['entry']) query_logs_output['Logs'] = pretty_logs human_readable = tableToMarkdown('Query ' + log_type + ' Logs:', query_logs_output['Logs'], ['TimeGenerated', 'SourceAddress', 'DestinationAddress', 'Application', 'Action', 'Rule', 'URLOrFilename'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'IgnoreAutoExtract': ignore_auto_extract, 'EntryContext': {"Panorama.Monitor(val.JobID == obj.JobID)": query_logs_output} }) ''' Security Policy Match''' def build_policy_match_query(application=None, category=None, destination=None, destination_port=None, from_=None, to_=None, protocol=None, source=None, source_user=None): query = '<test><security-policy-match>' if from_: query += f'<from>{from_}</from>' if to_: query += f'<to>{to_}</to>' if source: query += f'<source>{source}</source>' if destination: query += f'<destination>{destination}</destination>' if destination_port: query += f'<destination-port>{destination_port}</destination-port>' if protocol: query += f'<protocol>{protocol}</protocol>' if source_user: query += f'<source-user>{source_user}</source-user>' if application: query += f'<application>{application}</application>' if category: query += f'<category>{category}</category>' query += '</security-policy-match></test>' return query def panorama_security_policy_match(application=None, category=None, destination=None, destination_port=None, from_=None, to_=None, protocol=None, source=None, source_user=None): params = {'type': 'op', 'key': API_KEY, 'cmd': build_policy_match_query(application, category, destination, destination_port, from_, to_, protocol, source, source_user)} result = http_request( URL, 'GET', params=params ) return result['response']['result'] def prettify_matching_rule(matching_rule): pretty_matching_rule = {} if '@name' in matching_rule: pretty_matching_rule['Name'] = matching_rule['@name'] if 'from' in matching_rule: pretty_matching_rule['From'] = matching_rule['from'] if 'source' in matching_rule: pretty_matching_rule['Source'] = matching_rule['source'] if 'to' in matching_rule: pretty_matching_rule['To'] = matching_rule['to'] if 'destination' in matching_rule: pretty_matching_rule['Destination'] = matching_rule['destination'] if 'category' in matching_rule: pretty_matching_rule['Category'] = matching_rule['category'] if 'action' in matching_rule: pretty_matching_rule['Action'] = matching_rule['action'] return pretty_matching_rule def prettify_matching_rules(matching_rules): if not isinstance(matching_rules, list): # handle case of only one log that matched the query return prettify_matching_rule(matching_rules) pretty_matching_rules_arr = [] for matching_rule in matching_rules: pretty_matching_rule = prettify_matching_rule(matching_rule) pretty_matching_rules_arr.append(pretty_matching_rule) return pretty_matching_rules_arr def prettify_query_fields(application=None, category=None, destination=None, destination_port=None, from_=None, to_=None, protocol=None, source=None, source_user=None): pretty_query_fields = {'Source': source, 'Destination': destination, 'Protocol': protocol} if application: pretty_query_fields['Application'] = application if category: pretty_query_fields['Category'] = category if destination_port: pretty_query_fields['DestinationPort'] = destination_port if from_: pretty_query_fields['From'] = from_ if to_: pretty_query_fields['To'] = to_ if source_user: pretty_query_fields['SourceUser'] = source_user return pretty_query_fields def panorama_security_policy_match_command(): if not VSYS: raise Exception("The 'panorama-security-policy-match' command is only relevant for a Firewall instance.") application = demisto.args().get('application') category = demisto.args().get('category') destination = demisto.args().get('destination') destination_port = demisto.args().get('destination-port') from_ = demisto.args().get('from') to_ = demisto.args().get('to') protocol = demisto.args().get('protocol') source = demisto.args().get('source') source_user = demisto.args().get('source-user') matching_rules = panorama_security_policy_match(application, category, destination, destination_port, from_, to_, protocol, source, source_user) if not matching_rules: demisto.results('The query did not match a Security policy.') else: ec_ = {'Rules': prettify_matching_rules(matching_rules['rules']['entry']), 'QueryFields': prettify_query_fields(application, category, destination, destination_port, from_, to_, protocol, source, source_user), 'Query': build_policy_match_query(application, category, destination, destination_port, from_, to_, protocol, source, source_user)} demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': matching_rules, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Matching Security Policies:', ec_['Rules'], ['Name', 'Action', 'From', 'To', 'Source', 'Destination', 'Application'], removeNull=True), 'EntryContext': {"Panorama.SecurityPolicyMatch(val.Query == obj.Query)": ec_} }) ''' Static Routes''' def prettify_static_route(static_route: Dict, virtual_router: str, template: Optional[str] = None) -> Dict[str, str]: pretty_static_route: Dict = {} if '@name' in static_route: pretty_static_route['Name'] = static_route['@name'] if 'bfd' in static_route and 'profile' in static_route['bfd']: pretty_static_route['BFDprofile'] = static_route['bfd']['profile'] if 'destination' in static_route: if '@dirtyId' in static_route['destination']: pretty_static_route['Uncommitted'] = True else: pretty_static_route['Destination'] = static_route['destination'] if 'metric' in static_route: pretty_static_route['Metric'] = int(static_route['metric']) if 'nexthop' in static_route: if '@dirtyId' in static_route['destination']: pretty_static_route['Uncommitted'] = True else: nexthop: Dict[str, str] = static_route['nexthop'] if 'ip-address' in nexthop: pretty_static_route['NextHop'] = nexthop['ip-address'] elif 'next-vr' in static_route['nexthop']: pretty_static_route['NextHop'] = nexthop['next-vr'] elif 'fqdn' in static_route['nexthop']: pretty_static_route['NextHop'] = nexthop['fqdn'] elif 'discard' in static_route['nexthop']: pretty_static_route['NextHop'] = nexthop['discard'] if 'route-table' in static_route: route_table = static_route['route-table'] if 'unicast' in route_table: pretty_static_route['RouteTable'] = 'Unicast' elif 'multicast' in route_table: pretty_static_route['RouteTable'] = 'Multicast' elif 'both' in route_table: pretty_static_route['RouteTable'] = 'Both' else: # route table is no-install pretty_static_route['RouteTable'] = 'No install' pretty_static_route['VirtualRouter'] = virtual_router if template: pretty_static_route['Template'] = template return pretty_static_route def prettify_static_routes(static_routes, virtual_router: str, template: Optional[str] = None): if not isinstance(static_routes, list): # handle case of only one static route in a virtual router return prettify_static_route(static_routes, virtual_router, template) pretty_static_route_arr = [] for static_route in static_routes: pretty_static_route = prettify_static_route(static_route, virtual_router, template) pretty_static_route_arr.append(pretty_static_route) return pretty_static_route_arr @logger def panorama_list_static_routes(xpath_network: str, virtual_router: str, show_uncommitted: str) -> Dict[str, str]: action = 'get' if show_uncommitted else 'show' params = { 'action': action, 'type': 'config', 'xpath': f'{xpath_network}/virtual-router/entry[@name=\'{virtual_router}\']/routing-table/ip/static-route', 'key': API_KEY } result = http_request(URL, 'GET', params=params) return result['response']['result'] def panorama_list_static_routes_command(): """ List all static routes of a virtual Router """ template = demisto.args().get('template') xpath_network, template = set_xpath_network(template) virtual_router = demisto.args()['virtual_router'] show_uncommitted = demisto.args().get('show_uncommitted') == 'true' virtual_router_object = panorama_list_static_routes(xpath_network, virtual_router, show_uncommitted) if 'static-route' not in virtual_router_object or 'entry' not in virtual_router_object['static-route']: human_readable = 'The Virtual Router has does not exist or has no static routes configured.' static_routes = virtual_router_object else: static_routes = prettify_static_routes(virtual_router_object['static-route']['entry'], virtual_router, template) table_header = f'Displaying all Static Routes for the Virtual Router: {virtual_router}' headers = ['Name', 'Destination', 'NextHop', 'Uncommitted', 'RouteTable', 'Metric', 'BFDprofile'] human_readable = tableToMarkdown(name=table_header, t=static_routes, headers=headers, removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': virtual_router_object, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': {"Panorama.StaticRoutes(val.Name == obj.Name)": static_routes} }) @logger def panorama_get_static_route(xpath_network: str, virtual_router: str, static_route_name: str) -> Dict[str, str]: params = { 'action': 'get', 'type': 'config', 'xpath': f'{xpath_network}/virtual-router/entry[@name=\'{virtual_router}\']/routing-table/ip/' f'static-route/entry[@name=\'{static_route_name}\']', 'key': API_KEY } result = http_request(URL, 'GET', params=params) return result['response']['result'] def panorama_get_static_route_command(): """ Get a static route of a virtual router """ template = demisto.args().get('template') xpath_network, template = set_xpath_network(template) virtual_router = demisto.args()['virtual_router'] static_route_name = demisto.args()['static_route'] static_route_object = panorama_get_static_route(xpath_network, virtual_router, static_route_name) if '@count' in static_route_object and int(static_route_object['@count']) < 1: raise Exception('Static route does not exist.') static_route = prettify_static_route(static_route_object['entry'], virtual_router, template) table_header = f'Static route: {static_route_name}' demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': static_route_object, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown(name=table_header, t=static_route, removeNull=True), 'EntryContext': { "Panorama.StaticRoutes(val.Name == obj.Name)": static_route } }) @logger def panorama_add_static_route(xpath_network: str, virtual_router: str, static_route_name: str, destination: str, nexthop_type: str, nexthop_value: str, interface: str = None, metric: str = None) -> Dict[str, str]: params = { 'action': 'set', 'type': 'config', 'key': API_KEY, 'xpath': f'{xpath_network}/virtual-router/entry[@name=\'{virtual_router}\']/' f'routing-table/ip/static-route/entry[@name=\'{static_route_name}\']', 'element': f'<destination>{destination}</destination>' f'<nexthop><{nexthop_type}>{nexthop_value}</{nexthop_type}></nexthop>' } if interface: params['element'] += f'<interface>{interface}</interface>' if metric: params['element'] += f'<metric>{metric}</metric>' result = http_request(URL, 'GET', params=params) return result['response'] def panorama_add_static_route_command(): """ Add a Static Route """ template = demisto.args().get('template') xpath_network, template = set_xpath_network(template) virtual_router = demisto.args().get('virtual_router') static_route_name = demisto.args().get('static_route') destination = demisto.args().get('destination') nexthop_type = demisto.args().get('nexthop_type') nexthop_value = demisto.args().get('nexthop_value') interface = demisto.args().get('interface', None) metric = demisto.args().get('metric', None) if nexthop_type == 'fqdn': # Only from PAN-OS 9.x, creating a static route based on FQDN nexthop is available. major_version = get_pan_os_major_version() if major_version <= 8: raise Exception('Next Hop of type FQDN is only available for PAN-OS 9.x instances.') static_route = panorama_add_static_route(xpath_network, virtual_router, static_route_name, destination, nexthop_type, nexthop_value, interface, metric) human_readable = f'New uncommitted static route {static_route_name} configuration added.' entry_context = { 'Name': static_route_name, 'VirtualRouter': virtual_router, 'Destination': destination, 'NextHop': nexthop_value, } if interface: entry_context['Interface'] = interface if metric: entry_context['Metric'] = metric if template: entry_context['Template'] = template demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': static_route, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': {"Panorama.StaticRoutes(val.Name == obj.Name)": static_route} }) @logger def panorama_delete_static_route(xpath_network: str, virtual_router: str, route_name: str) -> Dict[str, str]: params = { 'action': 'delete', 'type': 'config', 'xpath': f'{xpath_network}/virtual-router/entry[@name=\'{virtual_router}\']/' f'routing-table/ip/static-route/entry[@name=\'{route_name}\']', 'key': API_KEY } result = http_request(URL, 'DELETE', params=params) return result def panorama_delete_static_route_command(): """ Delete a Static Route """ template = demisto.args().get('template') xpath_network, template = set_xpath_network(template) virtual_router = demisto.args()['virtual_router'] route_name = demisto.args()['route_name'] deleted_static_route = panorama_delete_static_route(xpath_network, virtual_router, route_name) entry_context = { 'Name': route_name, 'Deleted': True } demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': deleted_static_route, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': f'The static route: {route_name} was deleted. Changes are not committed.', 'EntryContext': {"Panorama.StaticRoutes(val.Name == obj.Name)": entry_context} # add key -> deleted: true }) def panorama_show_device_version(target: str = None): params = { 'type': 'op', 'cmd': '<show><system><info/></system></show>', 'key': API_KEY } if target: params['target'] = target result = http_request( URL, 'GET', params=params ) return result['response']['result']['system'] def panorama_show_device_version_command(): """ Get device details and show message in war room """ target = str(demisto.args()['target']) if 'target' in demisto.args() else None response = panorama_show_device_version(target) info_data = { 'Devicename': response['devicename'], 'Model': response['model'], 'Serial': response['serial'], 'Version': response['sw-version'] } entry_context = {"Panorama.Device.Info(val.Devicename === obj.Devicename)": info_data} headers = ['Devicename', 'Model', 'Serial', 'Version'] human_readable = tableToMarkdown('Device Version:', info_data, headers=headers, removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': response, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) @logger def panorama_download_latest_content_update_content(target: str): params = { 'type': 'op', 'target': target, 'cmd': '<request><content><upgrade><download><latest/></download></upgrade></content></request>', 'key': API_KEY } result = http_request( URL, 'POST', body=params ) return result def panorama_download_latest_content_update_command(): """ Download content and show message in war room """ if DEVICE_GROUP: raise Exception('Download latest content is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None result = panorama_download_latest_content_update_content(target) if 'result' in result['response']: # download has been given a jobid download_status_output = { 'JobID': result['response']['result']['job'], 'Status': 'Pending' } entry_context = {"Panorama.Content.Download(val.JobID == obj.JobID)": download_status_output} human_readable = tableToMarkdown('Content download:', download_status_output, ['JobID', 'Status'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) else: # no download took place demisto.results(result['response']['msg']) @logger def panorama_content_update_download_status(target: str, job_id: str): params = { 'type': 'op', 'cmd': f'<show><jobs><id>{job_id}</id></jobs></show>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def panorama_content_update_download_status_command(): """ Check jobID of content update download status """ if DEVICE_GROUP: raise Exception('Content download status is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None job_id = demisto.args()['job_id'] result = panorama_content_update_download_status(target, job_id) content_download_status = { 'JobID': result['response']['result']['job']['id'] } if result['response']['result']['job']['status'] == 'FIN': if result['response']['result']['job']['result'] == 'OK': content_download_status['Status'] = 'Completed' else: content_download_status['Status'] = 'Failed' content_download_status['Details'] = result['response']['result']['job'] if result['response']['result']['job']['status'] == 'PEND': content_download_status['Status'] = 'Pending' entry_context = {"Panorama.Content.Download(val.JobID == obj.JobID)": content_download_status} human_readable = tableToMarkdown('Content download status:', content_download_status, ['JobID', 'Status', 'Details'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) @logger def panorama_install_latest_content_update(target: str): params = { 'type': 'op', 'cmd': '<request><content><upgrade><install><version>latest</version></install></upgrade></content></request>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def panorama_install_latest_content_update_command(): """ Check jobID of content content install status """ if DEVICE_GROUP: raise Exception('Content download status is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None result = panorama_install_latest_content_update(target) if 'result' in result['response']: # installation has been given a jobid content_install_info = { 'JobID': result['response']['result']['job'], 'Status': 'Pending' } entry_context = {"Panorama.Content.Install(val.JobID == obj.JobID)": content_install_info} human_readable = tableToMarkdown('Result:', content_install_info, ['JobID', 'Status'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) else: # no content install took place demisto.results(result['response']['msg']) @logger def panorama_content_update_install_status(target: str, job_id: str): params = { 'type': 'op', 'cmd': f'<show><jobs><id>{job_id}</id></jobs></show>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def panorama_content_update_install_status_command(): """ Check jobID of content update install status """ if DEVICE_GROUP: raise Exception('Content download status is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None job_id = demisto.args()['job_id'] result = panorama_content_update_install_status(target, job_id) content_install_status = { 'JobID': result['response']['result']['job']['id'] } if result['response']['result']['job']['status'] == 'FIN': if result['response']['result']['job']['result'] == 'OK': content_install_status['Status'] = 'Completed' else: # result['response']['job']['result'] == 'FAIL' content_install_status['Status'] = 'Failed' content_install_status['Details'] = result['response']['result']['job'] if result['response']['result']['job']['status'] == 'PEND': content_install_status['Status'] = 'Pending' entry_context = {"Panorama.Content.Install(val.JobID == obj.JobID)": content_install_status} human_readable = tableToMarkdown('Content install status:', content_install_status, ['JobID', 'Status', 'Details'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) def panorama_check_latest_panos_software_command(): if DEVICE_GROUP: raise Exception('Checking latest PAN-OS version is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None params = { 'type': 'op', 'cmd': '<request><system><software><check></check></software></system></request>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) demisto.results(result['response']['result']) @logger def panorama_download_panos_version(target: str, target_version: str): params = { 'type': 'op', 'cmd': f'<request><system><software><download><version>{target_version}' f'</version></download></software></system></request>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def panorama_download_panos_version_command(): """ Check jobID of pan-os version download """ if DEVICE_GROUP: raise Exception('Downloading PAN-OS version is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None target_version = str(demisto.args()['target_version']) result = panorama_download_panos_version(target, target_version) if 'result' in result['response']: # download has been given a jobid panos_version_download = { 'JobID': result['response']['result']['job'] } entry_context = {"Panorama.PANOS.Download(val.JobID == obj.JobID)": panos_version_download} human_readable = tableToMarkdown('Result:', panos_version_download, ['JobID', 'Status'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) else: # no panos download took place demisto.results(result['response']['msg']) @logger def panorama_download_panos_status(target: str, job_id: str): params = { 'type': 'op', 'cmd': f'<show><jobs><id>{job_id}</id></jobs></show>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def panorama_download_panos_status_command(): """ Check jobID of panos download status """ if DEVICE_GROUP: raise Exception('PAN-OS version download status is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None job_id = demisto.args()['job_id'] result = panorama_download_panos_status(target, job_id) panos_download_status = { 'JobID': result['response']['result']['job']['id'] } if result['response']['result']['job']['status'] == 'FIN': if result['response']['result']['job']['result'] == 'OK': panos_download_status['Status'] = 'Completed' else: # result['response']['job']['result'] == 'FAIL' panos_download_status['Status'] = 'Failed' panos_download_status['Details'] = result['response']['result']['job'] if result['response']['result']['job']['status'] == 'PEND': panos_download_status['Status'] = 'Pending' human_readable = tableToMarkdown('PAN-OS download status:', panos_download_status, ['JobID', 'Status', 'Details'], removeNull=True) entry_context = {"Panorama.PANOS.Download(val.JobID == obj.JobID)": panos_download_status} demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) @logger def panorama_install_panos_version(target: str, target_version: str): params = { 'type': 'op', 'cmd': f'<request><system><software><install><version>{target_version}' '</version></install></software></system></request>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def panorama_install_panos_version_command(): """ Check jobID of panos install """ if DEVICE_GROUP: raise Exception('PAN-OS installation is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None target_version = str(demisto.args()['target_version']) result = panorama_install_panos_version(target, target_version) if 'result' in result['response']: # panos install has been given a jobid panos_install = { 'JobID': result['response']['result']['job'] } entry_context = {"Panorama.PANOS.Install(val.JobID == obj.JobID)": panos_install} human_readable = tableToMarkdown('PAN-OS Installation:', panos_install, ['JobID', 'Status'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) else: # no panos install took place demisto.results(result['response']['msg']) @logger def panorama_install_panos_status(target: str, job_id: str): params = { 'type': 'op', 'cmd': f'<show><jobs><id>{job_id}</id></jobs></show>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def panorama_install_panos_status_command(): """ Check jobID of panos install status """ if DEVICE_GROUP: raise Exception('PAN-OS installation status status is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None job_id = demisto.args()['job_id'] result = panorama_install_panos_status(target, job_id) panos_install_status = { 'JobID': result['response']['result']['job']['id'] } if result['response']['result']['job']['status'] == 'FIN': if result['response']['result']['job']['result'] == 'OK': panos_install_status['Status'] = 'Completed' else: # result['response']['job']['result'] == 'FAIL' panos_install_status['Status'] = 'Failed' panos_install_status['Details'] = result['response']['result']['job'] if result['response']['result']['job']['status'] == 'PEND': panos_install_status['Status'] = 'Pending' entry_context = {"Panorama.PANOS.Install(val.JobID == obj.JobID)": panos_install_status} human_readable = tableToMarkdown('PAN-OS installation status:', panos_install_status, ['JobID', 'Status', 'Details'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) def panorama_device_reboot_command(): if DEVICE_GROUP: raise Exception('Device reboot is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None params = { 'type': 'op', 'cmd': '<request><restart><system></system></restart></request>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) demisto.results(result['response']['result']) def main(): LOG(f'Command being called is: {demisto.command()}') try: # Remove proxy if not set to true in params handle_proxy() if demisto.command() == 'test-module': panorama_test() elif demisto.command() == 'panorama': panorama_command() elif demisto.command() == 'panorama-commit': panorama_commit_command() elif demisto.command() == 'panorama-commit-status': panorama_commit_status_command() elif demisto.command() == 'panorama-push-to-device-group': panorama_push_to_device_group_command() elif demisto.command() == 'panorama-push-status': panorama_push_status_command() # Addresses commands elif demisto.command() == 'panorama-list-addresses': panorama_list_addresses_command() elif demisto.command() == 'panorama-get-address': panorama_get_address_command() elif demisto.command() == 'panorama-create-address': panorama_create_address_command() elif demisto.command() == 'panorama-delete-address': panorama_delete_address_command() # Address groups commands elif demisto.command() == 'panorama-list-address-groups': panorama_list_address_groups_command() elif demisto.command() == 'panorama-get-address-group': panorama_get_address_group_command() elif demisto.command() == 'panorama-create-address-group': panorama_create_address_group_command() elif demisto.command() == 'panorama-delete-address-group': panorama_delete_address_group_command() elif demisto.command() == 'panorama-edit-address-group': panorama_edit_address_group_command() # Services commands elif demisto.command() == 'panorama-list-services': panorama_list_services_command() elif demisto.command() == 'panorama-get-service': panorama_get_service_command() elif demisto.command() == 'panorama-create-service': panorama_create_service_command() elif demisto.command() == 'panorama-delete-service': panorama_delete_service_command() # Service groups commands elif demisto.command() == 'panorama-list-service-groups': panorama_list_service_groups_command() elif demisto.command() == 'panorama-get-service-group': panorama_get_service_group_command() elif demisto.command() == 'panorama-create-service-group': panorama_create_service_group_command() elif demisto.command() == 'panorama-delete-service-group': panorama_delete_service_group_command() elif demisto.command() == 'panorama-edit-service-group': panorama_edit_service_group_command() # Custom Url Category commands elif demisto.command() == 'panorama-get-custom-url-category': panorama_get_custom_url_category_command() elif demisto.command() == 'panorama-create-custom-url-category': panorama_create_custom_url_category_command() elif demisto.command() == 'panorama-delete-custom-url-category': panorama_delete_custom_url_category_command() elif demisto.command() == 'panorama-edit-custom-url-category': panorama_edit_custom_url_category_command() # URL Filtering capabilities elif demisto.command() == 'panorama-get-url-category': panorama_get_url_category_command(url_cmd='url') elif demisto.command() == 'panorama-get-url-category-from-cloud': panorama_get_url_category_command(url_cmd='url-info-cloud') elif demisto.command() == 'panorama-get-url-category-from-host': panorama_get_url_category_command(url_cmd='url-info-host') # URL Filter elif demisto.command() == 'panorama-get-url-filter': panorama_get_url_filter_command() elif demisto.command() == 'panorama-create-url-filter': panorama_create_url_filter_command() elif demisto.command() == 'panorama-edit-url-filter': panorama_edit_url_filter_command() elif demisto.command() == 'panorama-delete-url-filter': panorama_delete_url_filter_command() # EDL elif demisto.command() == 'panorama-list-edls': panorama_list_edls_command() elif demisto.command() == 'panorama-get-edl': panorama_get_edl_command() elif demisto.command() == 'panorama-create-edl': panorama_create_edl_command() elif demisto.command() == 'panorama-edit-edl': panorama_edit_edl_command() elif demisto.command() == 'panorama-delete-edl': panorama_delete_edl_command() elif demisto.command() == 'panorama-refresh-edl': panorama_refresh_edl_command() # Registered IPs elif demisto.command() == 'panorama-register-ip-tag': panorama_register_ip_tag_command() elif demisto.command() == 'panorama-unregister-ip-tag': panorama_unregister_ip_tag_command() # Registered Users elif demisto.command() == 'panorama-register-user-tag': panorama_register_user_tag_command() elif demisto.command() == 'panorama-unregister-user-tag': panorama_unregister_user_tag_command() # Security Rules Managing elif demisto.command() == 'panorama-list-rules': panorama_list_rules_command() elif demisto.command() == 'panorama-move-rule': panorama_move_rule_command() # Security Rules Configuration elif demisto.command() == 'panorama-create-rule': panorama_create_rule_command() elif demisto.command() == 'panorama-custom-block-rule': panorama_custom_block_rule_command() elif demisto.command() == 'panorama-edit-rule': panorama_edit_rule_command() elif demisto.command() == 'panorama-delete-rule': panorama_delete_rule_command() # Traffic Logs - deprecated elif demisto.command() == 'panorama-query-traffic-logs': panorama_query_traffic_logs_command() elif demisto.command() == 'panorama-check-traffic-logs-status': panorama_check_traffic_logs_status_command() elif demisto.command() == 'panorama-get-traffic-logs': panorama_get_traffic_logs_command() # Logs elif demisto.command() == 'panorama-query-logs': panorama_query_logs_command() elif demisto.command() == 'panorama-check-logs-status': panorama_check_logs_status_command() elif demisto.command() == 'panorama-get-logs': panorama_get_logs_command() # Pcaps elif demisto.command() == 'panorama-list-pcaps': panorama_list_pcaps_command() elif demisto.command() == 'panorama-get-pcap': panorama_get_pcap_command() # Application elif demisto.command() == 'panorama-list-applications': panorama_list_applications_command() # Test security policy match elif demisto.command() == 'panorama-security-policy-match': panorama_security_policy_match_command() # Static Routes elif demisto.command() == 'panorama-list-static-routes': panorama_list_static_routes_command() elif demisto.command() == 'panorama-get-static-route': panorama_get_static_route_command() elif demisto.command() == 'panorama-add-static-route': panorama_add_static_route_command() elif demisto.command() == 'panorama-delete-static-route': panorama_delete_static_route_command() # Firewall Upgrade # Check device software version elif demisto.command() == 'panorama-show-device-version': panorama_show_device_version_command() # Download the latest content update elif demisto.command() == 'panorama-download-latest-content-update': panorama_download_latest_content_update_command() # Download the latest content update elif demisto.command() == 'panorama-content-update-download-status': panorama_content_update_download_status_command() # Install the latest content update elif demisto.command() == 'panorama-install-latest-content-update': panorama_install_latest_content_update_command() # Content update install status elif demisto.command() == 'panorama-content-update-install-status': panorama_content_update_install_status_command() # Check PAN-OS latest software update elif demisto.command() == 'panorama-check-latest-panos-software': panorama_check_latest_panos_software_command() # Download target PAN-OS version elif demisto.command() == 'panorama-download-panos-version': panorama_download_panos_version_command() # PAN-OS download status elif demisto.command() == 'panorama-download-panos-status': panorama_download_panos_status_command() # PAN-OS software install elif demisto.command() == 'panorama-install-panos-version': panorama_install_panos_version_command() # PAN-OS install status elif demisto.command() == 'panorama-install-panos-status': panorama_install_panos_status_command() # Reboot Panorama Device elif demisto.command() == 'panorama-device-reboot': panorama_device_reboot_command() else: raise NotImplementedError(f'Command {demisto.command()} was not implemented.') except Exception as err: return_error(str(err)) finally: LOG.print_log() if __name__ in ["__builtin__", "builtins", '__main__']: main()
import demistomock as demisto from CommonServerPython import * from CommonServerUserPython import * ''' IMPORTS ''' from datetime import datetime from typing import Dict, List, Any, Optional, Tuple import uuid import json import requests # disable insecure warnings requests.packages.urllib3.disable_warnings() ''' GLOBALS ''' if not demisto.params().get('port'): return_error('Set a port for the instance') URL = demisto.params()['server'].rstrip('/:') + ':' + demisto.params().get('port') + '/api/' API_KEY = str(demisto.params().get('key')) USE_SSL = not demisto.params().get('insecure') # determine a vsys or a device-group VSYS = demisto.params().get('vsys') if demisto.args() and demisto.args().get('device-group', None): DEVICE_GROUP = demisto.args().get('device-group') else: DEVICE_GROUP = demisto.params().get('device_group', None) # configuration check if DEVICE_GROUP and VSYS: return_error('Cannot configure both vsys and Device group. Set vsys for firewall, set Device group for Panorama.') if not DEVICE_GROUP and not VSYS: return_error('Set vsys for firewall or Device group for Panorama.') # setting security xpath relevant to FW or panorama management if DEVICE_GROUP: device_group_shared = DEVICE_GROUP.lower() if device_group_shared == 'shared': XPATH_SECURITY_RULES = "/config/shared/" DEVICE_GROUP = device_group_shared else: XPATH_SECURITY_RULES = "/config/devices/entry/device-group/entry[@name=\'" + DEVICE_GROUP + "\']/" else: XPATH_SECURITY_RULES = "/config/devices/entry/vsys/entry[@name=\'" + VSYS + "\']/rulebase/security/rules/entry" # setting objects xpath relevant to FW or panorama management if DEVICE_GROUP: device_group_shared = DEVICE_GROUP.lower() if DEVICE_GROUP == 'shared': XPATH_OBJECTS = "/config/shared/" DEVICE_GROUP = device_group_shared else: XPATH_OBJECTS = "/config/devices/entry/device-group/entry[@name=\'" + DEVICE_GROUP + "\']/" else: XPATH_OBJECTS = "/config/devices/entry/vsys/entry[@name=\'" + VSYS + "\']/" # Security rule arguments for output handling SECURITY_RULE_ARGS = { 'rulename': 'Name', 'source': 'Source', 'destination': 'Destination', 'negate_source': 'NegateSource', 'negate_destination': 'NegateDestination', 'action': 'Action', 'service': 'Service', 'disable': 'Disabled', 'application': 'Application', 'source_user': 'SourceUser', 'disable_server_response_inspection': 'DisableServerResponseInspection', 'description': 'Description', 'target': 'Target', 'log_forwarding': 'LogForwarding', 'log-setting': 'LogForwarding', 'tag': 'Tags' } PAN_OS_ERROR_DICT = { '1': 'Unknown command - The specific config or operational command is not recognized.', '2': 'Internal errors - Check with technical support when seeing these errors.', '3': 'Internal errors - Check with technical support when seeing these errors.', '4': 'Internal errors - Check with technical support when seeing these errors.', '5': 'Internal errors - Check with technical support when seeing these errors.', '6': 'Bad Xpath -The xpath specified in one or more attributes of the command is invalid.' 'Check the API browser for proper xpath values.', '7': 'Object not present - Object specified by the xpath is not present. For example,' 'entry[@name=value] where no object with name value is present.', '8': 'Object not unique - For commands that operate on a single object, the specified object is not unique.', '10': 'Reference count not zero - Object cannot be deleted as there are other objects that refer to it.' 'For example, address object still in use in policy.', '11': 'Internal error - Check with technical support when seeing these errors.', '12': 'Invalid object - Xpath or element values provided are not complete.', '14': 'Operation not possible - Operation is allowed but not possible in this case.' 'For example, moving a rule up one position when it is already at the top.', '15': 'Operation denied - Operation is allowed. For example, Admin not allowed to delete own account,' 'Running a command that is not allowed on a passive device.', '16': 'Unauthorized -The API role does not have access rights to run this query.', '17': 'Invalid command -Invalid command or parameters.', '18': 'Malformed command - The XML is malformed.', # 19,20: success '21': 'Internal error - Check with technical support when seeing these errors.', '22': 'Session timed out - The session for this query timed out.' } class PAN_OS_Not_Found(Exception): """ PAN-OS Error. """ def __init__(self, *args): # real signature unknown pass def http_request(uri: str, method: str, headers: Dict = {}, body: Dict = {}, params: Dict = {}, files=None) -> Any: """ Makes an API call with the given arguments """ result = requests.request( method, uri, headers=headers, data=body, verify=USE_SSL, params=params, files=files ) if result.status_code < 200 or result.status_code >= 300: raise Exception('Request Failed. with status: ' + str(result.status_code) + '. Reason is: ' + str(result.reason)) # if pcap download if params.get('type') == 'export': return result json_result = json.loads(xml2json(result.text)) # handle non success if json_result['response']['@status'] != 'success': if 'msg' in json_result['response'] and 'line' in json_result['response']['msg']: # catch non existing object error and display a meaningful message if json_result['response']['msg']['line'] == 'No such node': raise Exception( 'Object was not found, verify that the name is correct and that the instance was committed.') # catch urlfiltering error and display a meaningful message elif str(json_result['response']['msg']['line']).find('test -> url') != -1: raise Exception('The URL filtering license is either expired or not active.' ' Please contact your PAN-OS representative.') # catch non valid jobID errors and display a meaningful message elif isinstance(json_result['response']['msg']['line'], str) and \ json_result['response']['msg']['line'].find('job') != -1 and \ (json_result['response']['msg']['line'].find('not found') != -1 or json_result['response']['msg']['line'].find('No such query job')) != -1: raise Exception('Invalid Job ID error: ' + json_result['response']['msg']['line']) # catch already at the top/bottom error for rules and return this as an entry.note elif str(json_result['response']['msg']['line']).find('already at the') != -1: demisto.results('Rule ' + str(json_result['response']['msg']['line'])) sys.exit(0) # catch already registered ip tags and return this as an entry.note elif str(json_result['response']['msg']['line']).find('already exists, ignore') != -1: if isinstance(json_result['response']['msg']['line']['uid-response']['payload']['register']['entry'], list): ips = [o['@ip'] for o in json_result['response']['msg']['line']['uid-response']['payload']['register']['entry']] else: ips = json_result['response']['msg']['line']['uid-response']['payload']['register']['entry']['@ip'] demisto.results( 'IP ' + str(ips) + ' already exist in the tag. All submitted IPs were not registered to the tag.') sys.exit(0) # catch timed out log queries and return this as an entry.note elif str(json_result['response']['msg']['line']).find('Query timed out') != -1: demisto.results(str(json_result['response']['msg']['line']) + '. Rerun the query.') sys.exit(0) if '@code' in json_result['response']: raise Exception( 'Request Failed.\nStatus code: ' + str(json_result['response']['@code']) + '\nWith message: ' + str( json_result['response']['msg']['line'])) else: raise Exception('Request Failed.\n' + str(json_result['response'])) # handle @code if 'response' in json_result and '@code' in json_result['response']: if json_result['response']['@code'] in PAN_OS_ERROR_DICT: error_message = 'Request Failed.\n' + PAN_OS_ERROR_DICT[json_result['response']['@code']] if json_result['response']['@code'] == '7' and DEVICE_GROUP: device_group_names = get_device_groups_names() if DEVICE_GROUP not in device_group_names: error_message += (f'\nDevice Group: {DEVICE_GROUP} does not exist.' f' The available Device Groups for this instance:' f' {", ".join(device_group_names)}.') raise PAN_OS_Not_Found(error_message) return_warning('List not found and might be empty', True) if json_result['response']['@code'] not in ['19', '20']: # error code non exist in dict and not of success if 'msg' in json_result['response']: raise Exception( 'Request Failed.\nStatus code: ' + str(json_result['response']['@code']) + '\nWith message: ' + str( json_result['response']['msg'])) else: raise Exception('Request Failed.\n' + str(json_result['response'])) return json_result def add_argument_list(arg: Any, field_name: str, member: Optional[bool], any_: Optional[bool] = False) -> str: member_stringify_list = '' if arg: if isinstance(arg, str): arg = [arg] for item in arg: member_stringify_list += '<member>' + item + '</member>' if field_name == 'member': return member_stringify_list elif member: return '<' + field_name + '>' + member_stringify_list + '</' + field_name + '>' else: return '<' + field_name + '>' + arg + '</' + field_name + '>' if any_: if member: return '<' + field_name + '><member>any</member></' + field_name + '>' else: return '<' + field_name + '>any</' + field_name + '>' else: return '' def add_argument(arg: Optional[str], field_name: str, member: bool) -> str: if arg: if member: return '<' + field_name + '><member>' + arg + '</member></' + field_name + '>' else: return '<' + field_name + '>' + arg + '</' + field_name + '>' else: return '' def add_argument_open(arg: Optional[str], field_name: str, member: bool) -> str: if arg: if member: return '<' + field_name + '><member>' + arg + '</member></' + field_name + '>' else: return '<' + field_name + '>' + arg + '</' + field_name + '>' else: if member: return '<' + field_name + '><member>any</member></' + field_name + '>' else: return '<' + field_name + '>any</' + field_name + '>' def add_argument_yes_no(arg: Optional[str], field_name: str, option: bool = False) -> str: if arg and arg == 'No': result = '<' + field_name + '>' + 'no' + '</' + field_name + '>' else: result = '<' + field_name + '>' + ('yes' if arg else 'no') + '</' + field_name + '>' if option: result = '<option>' + result + '</option>' return result def add_argument_target(arg: Optional[str], field_name: str) -> str: if arg: return '<' + field_name + '>' + '<devices>' + '<entry name=\"' + arg + '\"/>' + '</devices>' + '</' + field_name + '>' else: return '' def set_xpath_network(template: str = None) -> Tuple[str, Optional[str]]: """ Setting template xpath relevant to panorama instances. """ if template: if not DEVICE_GROUP or VSYS: raise Exception('Template is only relevant for Panorama instances.') if not template: template = demisto.params().get('template', None) # setting network xpath relevant to FW or panorama management if DEVICE_GROUP: xpath_network = f'/config/devices/entry[@name=\'localhost.localdomain\']/template/entry[@name=\'{template}\']' \ f'/config/devices/entry[@name=\'localhost.localdomain\']/network' else: xpath_network = "/config/devices/entry[@name='localhost.localdomain']/network" return xpath_network, template def prepare_security_rule_params(api_action: str = None, rulename: str = None, source: Any = None, destination: Any = None, negate_source: str = None, negate_destination: str = None, action: str = None, service: List[str] = None, disable: str = None, application: List[str] = None, source_user: str = None, category: List[str] = None, from_: str = None, to: str = None, description: str = None, target: str = None, log_forwarding: str = None, disable_server_response_inspection: str = None, tags: List[str] = None) -> Dict: if application is None or len(application) == 0: # application always must be specified and the default should be any application = ['any'] rulename = rulename if rulename else ('demisto-' + (str(uuid.uuid4()))[:8]) params = { 'type': 'config', 'action': api_action, 'key': API_KEY, 'element': add_argument_open(action, 'action', False) + add_argument_target(target, 'target') + add_argument_open(description, 'description', False) + add_argument_list(source, 'source', True, True) + add_argument_list(destination, 'destination', True, True) + add_argument_list(application, 'application', True) + add_argument_list(category, 'category', True) + add_argument_open(source_user, 'source-user', True) + add_argument_open(from_, 'from', True) # default from will always be any + add_argument_open(to, 'to', True) # default to will always be any + add_argument_list(service, 'service', True, True) + add_argument_yes_no(negate_source, 'negate-source') + add_argument_yes_no(negate_destination, 'negate-destination') + add_argument_yes_no(disable, 'disabled') + add_argument_yes_no(disable_server_response_inspection, 'disable-server-response-inspection', True) + add_argument(log_forwarding, 'log-setting', False) + add_argument_list(tags, 'tag', True) } if DEVICE_GROUP: if 'pre_post' not in demisto.args(): raise Exception('Please provide the pre_post argument when configuring' ' a security rule in Panorama instance.') else: params['xpath'] = XPATH_SECURITY_RULES + demisto.args()[ 'pre_post'] + '/security/rules/entry' + '[@name=\'' + rulename + '\']' else: params['xpath'] = XPATH_SECURITY_RULES + '[@name=\'' + rulename + '\']' return params def get_pan_os_version() -> str: """Retrieves pan-os version Returns: String representation of the version """ params = { 'type': 'version', 'key': API_KEY } result = http_request(URL, 'GET', params=params) version = result['response']['result']['sw-version'] return version def get_pan_os_major_version() -> int: """Retrieves pan-os major version Returns: String representation of the major version """ major_version = int(get_pan_os_version().split('.')[0]) return major_version ''' FUNCTIONS''' def panorama_test(): """ test module """ params = { 'type': 'op', 'cmd': '<show><system><info></info></system></show>', 'key': API_KEY } http_request( URL, 'GET', params=params ) if DEVICE_GROUP and DEVICE_GROUP != 'shared': device_group_test() _, template = set_xpath_network() if template: template_test(template) demisto.results('ok') def get_device_groups_names(): """ Get device group names in the Panorama """ params = { 'action': 'get', 'type': 'config', 'xpath': "/config/devices/entry/device-group/entry", 'key': API_KEY } result = http_request( URL, 'GET', params=params ) device_groups = result['response']['result']['entry'] device_group_names = [] if isinstance(device_groups, dict): # only one device group in the panorama device_group_names.append(device_groups.get('@name')) else: for device_group in device_groups: device_group_names.append(device_group.get('@name')) return device_group_names def device_group_test(): """ Test module for the Device group specified """ device_group_names = get_device_groups_names() if DEVICE_GROUP not in device_group_names: raise Exception(f'Device Group: {DEVICE_GROUP} does not exist.' f' The available Device Groups for this instance: {", ".join(device_group_names)}.') def get_templates_names(): """ Get templates names in the Panorama """ params = { 'action': 'get', 'type': 'config', 'xpath': "/config/devices/entry[@name=\'localhost.localdomain\']/template/entry", 'key': API_KEY } result = http_request( URL, 'GET', params=params ) templates = result['response']['result']['entry'] template_names = [] if isinstance(templates, dict): # only one device group in the panorama template_names.append(templates.get('@name')) else: for template in templates: template_names.append(template.get('@name')) return template_names def template_test(template): """ Test module for the Template specified """ template_names = get_templates_names() if template not in template_names: raise Exception(f'Template: {template} does not exist.' f' The available Templates for this instance: {", ".join(template_names)}.') @logger def panorama_command(): """ Executes a command """ params = {} for arg in demisto.args().keys(): params[arg] = demisto.args()[arg] params['key'] = API_KEY result = http_request( URL, 'POST', body=params ) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Command was executed successfully.', }) @logger def panorama_commit(): params = { 'type': 'commit', 'cmd': '<commit></commit>', 'key': API_KEY } result = http_request( URL, 'POST', body=params ) return result def panorama_commit_command(): """ Commit and show message in warroom """ result = panorama_commit() if 'result' in result['response']: # commit has been given a jobid commit_output = { 'JobID': result['response']['result']['job'], 'Status': 'Pending' } demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Commit:', commit_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': { "Panorama.Commit(val.JobID == obj.JobID)": commit_output } }) else: # no changes to commit demisto.results(result['response']['msg']) @logger def panorama_commit_status(): params = { 'type': 'op', 'cmd': '<show><jobs><id>' + demisto.args()['job_id'] + '</id></jobs></show>', 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def panorama_commit_status_command(): """ Check jobID of commit status """ result = panorama_commit_status() if result['response']['result']['job']['type'] != 'Commit': raise Exception('JobID given is not of a commit.') commit_status_output = {'JobID': result['response']['result']['job']['id']} if result['response']['result']['job']['status'] == 'FIN': if result['response']['result']['job']['result'] == 'OK': commit_status_output['Status'] = 'Completed' else: # result['response']['job']['result'] == 'FAIL' commit_status_output['Status'] = 'Failed' commit_status_output['Details'] = result['response']['result']['job']['details']['line'] if result['response']['result']['job']['status'] == 'ACT': if result['response']['result']['job']['result'] == 'PEND': commit_status_output['Status'] = 'Pending' # WARNINGS - Job warnings status_warnings = [] if result.get("response", {}).get('result', {}).get('job', {}).get('warnings', {}): status_warnings = result.get("response", {}).get('result', {}).get('job', {}).get('warnings', {}).get('line', []) ignored_error = 'configured with no certificate profile' commit_status_output["Warnings"] = [item for item in status_warnings if item not in ignored_error] demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Commit status:', commit_status_output, ['JobID', 'Status', 'Details', 'Warnings'], removeNull=True), 'EntryContext': {"Panorama.Commit(val.JobID == obj.JobID)": commit_status_output} }) @logger def panorama_push_to_device_group(): params = { 'type': 'commit', 'action': 'all', 'cmd': '<commit-all><shared-policy><device-group><entry name=\"' + DEVICE_GROUP + '\"/></device-group></shared-policy></commit-all>', 'key': API_KEY } result = http_request( URL, 'POST', body=params ) return result def panorama_push_to_device_group_command(): """ Push Panorama configuration and show message in warroom """ if not DEVICE_GROUP: raise Exception("The 'panorama-push-to-device-group' command is relevant for a Palo Alto Panorama instance.") result = panorama_push_to_device_group() if 'result' in result['response']: # commit has been given a jobid push_output = { 'DeviceGroup': DEVICE_GROUP, 'JobID': result['response']['result']['job'], 'Status': 'Pending' } demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Push to Device Group:', push_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': { "Panorama.Push(val.JobID == obj.JobID)": push_output } }) else: # no changes to commit demisto.results(result['response']['msg']['line']) @logger def panorama_push_status(): params = { 'type': 'op', 'cmd': '<show><jobs><id>' + demisto.args()['job_id'] + '</id></jobs></show>', 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def safeget(dct, keys): # Safe get from dictionary for key in keys: try: if isinstance(dct, dict): dct = dct[key] else: return None except KeyError: return None return dct def panorama_push_status_command(): """ Check jobID of push status """ result = panorama_push_status() job = result.get('response', {}).get('result', {}).get('job', {}) if job.get('type', '') != 'CommitAll': raise Exception('JobID given is not of a Push.') push_status_output = {'JobID': job.get('id')} if job.get('status', '') == 'FIN': if job.get('result', '') == 'OK': push_status_output['Status'] = 'Completed' else: push_status_output['Status'] = 'Failed' devices = job.get('devices') devices = devices.get('entry') if devices else devices if isinstance(devices, list): devices_details = [device.get('status') for device in devices if device] push_status_output['Details'] = devices_details elif isinstance(devices, dict): push_status_output['Details'] = devices.get('status') if job.get('status') == 'PEND': push_status_output['Status'] = 'Pending' # WARNINGS - Job warnings status_warnings = [] # type: ignore devices = safeget(result, ["response", "result", "job", "devices", "entry"]) if devices: for device in devices: device_warnings = safeget(device, ["details", "msg", "warnings", "line"]) status_warnings.extend([] if not device_warnings else device_warnings) push_status_output["Warnings"] = status_warnings demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Push to Device Group status:', push_status_output, ['JobID', 'Status', 'Details', 'Warnings'], removeNull=True), 'EntryContext': {"Panorama.Push(val.JobID == obj.JobID)": push_status_output} }) ''' Addresses Commands ''' def prettify_addresses_arr(addresses_arr: list) -> List: if not isinstance(addresses_arr, list): return prettify_address(addresses_arr) pretty_addresses_arr = [] for address in addresses_arr: pretty_address = {'Name': address['@name']} if DEVICE_GROUP: pretty_address['DeviceGroup'] = DEVICE_GROUP if 'description' in address: pretty_address['Description'] = address['description'] if 'ip-netmask' in address: pretty_address['IP_Netmask'] = address['ip-netmask'] if 'ip-range' in address: pretty_address['IP_Range'] = address['ip-range'] if 'fqdn' in address: pretty_address['FQDN'] = address['fqdn'] if 'tag' in address and 'member' in address['tag']: pretty_address['Tags'] = address['tag']['member'] pretty_addresses_arr.append(pretty_address) return pretty_addresses_arr @logger def panorama_list_addresses(tag=None): params = { 'action': 'get', 'type': 'config', 'xpath': XPATH_OBJECTS + "address/entry", 'key': API_KEY } if tag: params['xpath'] += f'[( tag/member = \'{tag}\')]' result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_list_addresses_command(): """ Get all addresses """ tag = demisto.args().get('tag') addresses_arr = panorama_list_addresses(tag) addresses_output = prettify_addresses_arr(addresses_arr) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': addresses_arr, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Addresses:', addresses_output, ['Name', 'IP_Netmask', 'IP_Range', 'FQDN', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.Addresses(val.Name == obj.Name)": addresses_output } }) def prettify_address(address: Dict) -> Dict: pretty_address = {'Name': address['@name']} if DEVICE_GROUP: pretty_address['DeviceGroup'] = DEVICE_GROUP if 'description' in address: pretty_address['Description'] = address['description'] if 'ip-netmask' in address: pretty_address['IP_Netmask'] = address['ip-netmask'] if 'ip-range' in address: pretty_address['IP_Range'] = address['ip-range'] if 'fqdn' in address: pretty_address['FQDN'] = address['fqdn'] if 'tag' in address and 'member' in address['tag']: pretty_address['Tags'] = address['tag']['member'] return pretty_address @logger def panorama_get_address(address_name: str) -> Dict: params = { 'action': 'show', 'type': 'config', 'xpath': XPATH_OBJECTS + "address/entry[@name='" + address_name + "']", 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_get_address_command(): """ Get an address """ address_name = demisto.args()['name'] address = panorama_get_address(address_name) address_output = prettify_address(address) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': address, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Address:', address_output, ['Name', 'IP_Netmask', 'IP_Range', 'FQDN', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.Addresses(val.Name == obj.Name)": address_output } }) @logger def panorama_create_address(address_name: str, fqdn: str = None, ip_netmask: str = None, ip_range: str = None, description: str = None, tags: list = None): params = {'action': 'set', 'type': 'config', 'xpath': XPATH_OBJECTS + "address/entry[@name='" + address_name + "']", 'key': API_KEY, 'element': (add_argument(fqdn, 'fqdn', False) + add_argument(ip_netmask, 'ip-netmask', False) + add_argument(ip_range, 'ip-range', False) + add_argument(description, 'description', False) + add_argument_list(tags, 'tag', True)) } http_request( URL, 'POST', body=params, ) def panorama_create_address_command(): """ Create an address object """ address_name = demisto.args()['name'] description = demisto.args().get('description') tags = argToList(demisto.args()['tag']) if 'tag' in demisto.args() else None fqdn = demisto.args().get('fqdn') ip_netmask = demisto.args().get('ip_netmask') ip_range = demisto.args().get('ip_range') if not fqdn and not ip_netmask and not ip_range: raise Exception('Please specify exactly one of the following: fqdn, ip_netmask, ip_range.') if (fqdn and ip_netmask) or (fqdn and ip_range) or (ip_netmask and ip_range): raise Exception('Please specify exactly one of the following: fqdn, ip_netmask, ip_range.') address = panorama_create_address(address_name, fqdn, ip_netmask, ip_range, description, tags) address_output = {'Name': address_name} if DEVICE_GROUP: address_output['DeviceGroup'] = DEVICE_GROUP if fqdn: address_output['FQDN'] = fqdn if ip_netmask: address_output['IP_Netmask'] = ip_netmask if ip_range: address_output['IP_Range'] = ip_range if description: address_output['Description'] = description if tags: address_output['Tags'] = tags demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': address, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Address was created successfully.', 'EntryContext': { "Panorama.Addresses(val.Name == obj.Name)": address_output } }) @logger def panorama_delete_address(address_name: str): params = { 'action': 'delete', 'type': 'config', 'xpath': XPATH_OBJECTS + "address/entry[@name='" + address_name + "']", 'element': "<entry name='" + address_name + "'></entry>", 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_delete_address_command(): """ Delete an address """ address_name = demisto.args()['name'] address = panorama_delete_address(address_name) address_output = {'Name': address_name} if DEVICE_GROUP: address_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': address, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Address was deleted successfully.', 'EntryContext': { "Panorama.Addresses(val.Name == obj.Name)": address_output } }) ''' Address Group Commands ''' def prettify_address_groups_arr(address_groups_arr: list) -> List: if not isinstance(address_groups_arr, list): return prettify_address_group(address_groups_arr) pretty_address_groups_arr = [] for address_group in address_groups_arr: pretty_address_group = { 'Name': address_group['@name'], 'Type': 'static' if 'static' in address_group else 'dynamic' } if DEVICE_GROUP: pretty_address_group['DeviceGroup'] = DEVICE_GROUP if 'description' in address_group: pretty_address_group['Description'] = address_group['description'] if 'tag' in address_group and 'member' in address_group['tag']: pretty_address_group['Tags'] = address_group['tag']['member'] if pretty_address_group['Type'] == 'static': # static address groups can have empty lists if address_group['static']: pretty_address_group['Addresses'] = address_group['static']['member'] else: pretty_address_group['Match'] = address_group['dynamic']['filter'] pretty_address_groups_arr.append(pretty_address_group) return pretty_address_groups_arr @logger def panorama_list_address_groups(tag: str = None): params = { 'action': 'get', 'type': 'config', 'xpath': XPATH_OBJECTS + "address-group/entry", 'key': API_KEY } if tag: params['xpath'] += f'[( tag/member = \'{tag}\')]' result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_list_address_groups_command(): """ Get all address groups """ tag = demisto.args().get('tag') address_groups_arr = panorama_list_address_groups(tag) address_groups_output = prettify_address_groups_arr(address_groups_arr) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': address_groups_arr, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Address groups:', address_groups_output, ['Name', 'Type', 'Addresses', 'Match', 'Description', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.AddressGroups(val.Name == obj.Name)": address_groups_output } }) def prettify_address_group(address_group: Dict) -> Dict: pretty_address_group = { 'Name': address_group['@name'], 'Type': 'static' if 'static' in address_group else 'dynamic' } if DEVICE_GROUP: pretty_address_group['DeviceGroup'] = DEVICE_GROUP if 'description' in address_group: pretty_address_group['Description'] = address_group['description'] if 'tag' in address_group and 'member' in address_group['tag']: pretty_address_group['Tags'] = address_group['tag']['member'] if pretty_address_group['Type'] == 'static': pretty_address_group['Addresses'] = address_group['static']['member'] else: pretty_address_group['Match'] = address_group['dynamic']['filter'] return pretty_address_group @logger def panorama_get_address_group(address_group_name: str): params = { 'action': 'show', 'type': 'config', 'xpath': XPATH_OBJECTS + "address-group/entry[@name='" + address_group_name + "']", 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_get_address_group_command(): """ Get an address group """ address_group_name = demisto.args()['name'] result = panorama_get_address_group(address_group_name) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Address group:', prettify_address_group(result), ['Name', 'Type', 'Addresses', 'Match', 'Description', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.AddressGroups(val.Name == obj.Name)": prettify_address_group(result) } }) @logger def panorama_create_static_address_group(address_group_name: str, addresses: list, description: str = None, tags: list = None): params = {'action': 'set', 'type': 'config', 'xpath': XPATH_OBJECTS + "address-group/entry[@name='" + address_group_name + "']", 'key': API_KEY, 'element': ( "<static>" + add_argument_list(addresses, 'member', True) + "</static>" + add_argument(description, 'description', False) + add_argument_list(tags, 'tag', True) )} result = http_request( URL, 'POST', body=params, ) return result def panorama_create_dynamic_address_group(address_group_name: str, match: str, description: str = None, tags: list = None): params = { 'action': 'set', 'type': 'config', 'xpath': XPATH_OBJECTS + "address-group/entry[@name='" + address_group_name + "']", 'element': "<dynamic>" + add_argument(match, 'filter', False) + "</dynamic>" + add_argument(description, 'description', False) + add_argument_list(tags, 'tag', True), 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_create_address_group_command(): """ Create an address group """ address_group_name = demisto.args()['name'] type_ = demisto.args()['type'] description = demisto.args().get('description') tags = argToList(demisto.args()['tags']) if 'tags' in demisto.args() else None match = demisto.args().get('match') addresses = argToList(demisto.args()['addresses']) if 'addresses' in demisto.args() else None if match and addresses: raise Exception('Please specify only one of the following: addresses, match.') if type_ == 'static': if not addresses: raise Exception('Please specify addresses in order to create a static address group.') if type_ == 'dynamic': if not match: raise Exception('Please specify a match in order to create a dynamic address group.') if type_ == 'static': result = panorama_create_static_address_group(address_group_name, addresses, description, tags) else: result = panorama_create_dynamic_address_group(address_group_name, match, description, tags) address_group_output = { 'Name': address_group_name, 'Type': type_ } if DEVICE_GROUP: address_group_output['DeviceGroup'] = DEVICE_GROUP if match: address_group_output['Match'] = match if addresses: address_group_output['Addresses'] = addresses if description: address_group_output['Description'] = description if tags: address_group_output['Tags'] = tags demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Address group was created successfully.', 'EntryContext': { "Panorama.AddressGroups(val.Name == obj.Name)": address_group_output } }) @logger def panorama_delete_address_group(address_group_name: str): params = { 'action': 'delete', 'type': 'config', 'xpath': XPATH_OBJECTS + "address-group/entry[@name='" + address_group_name + "']", 'element': "<entry name='" + address_group_name + "'></entry>", 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_delete_address_group_command(): """ Delete an address group """ address_group_name = demisto.args()['name'] address_group = panorama_delete_address_group(address_group_name) address_group_output = {'Name': address_group_name} if DEVICE_GROUP: address_group_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': address_group, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Address group was deleted successfully.', 'EntryContext': { "Panorama.AddressGroups(val.Name == obj.Name)": address_group_output } }) def panorama_edit_address_group_command(): """ Edit an address group """ address_group_name = demisto.args()['name'] type_ = demisto.args()['type'] match = demisto.args().get('match') element_to_add = argToList(demisto.args()['element_to_add']) if 'element_to_add' in demisto.args() else None element_to_remove = argToList( demisto.args()['element_to_remove']) if 'element_to_remove' in demisto.args() else None if type_ == 'dynamic': if not match: raise Exception('To edit a Dynamic Address group, Please provide a match.') match_param = add_argument_open(match, 'filter', False) match_path = XPATH_OBJECTS + "address-group/entry[@name='" + address_group_name + "']/dynamic/filter" if type_ == 'static': if (element_to_add and element_to_remove) or (not element_to_add and not element_to_remove): raise Exception('To edit a Static Address group,' 'Please specify exactly one of the following: element_to_add, element_to_remove.') address_group_prev = panorama_get_address_group(address_group_name) address_group_list: List[str] = [] if 'static' in address_group_prev: if address_group_prev['static']: address_group_list = argToList(address_group_prev['static']['member']) if element_to_add: addresses = list(set(element_to_add + address_group_list)) else: addresses = [item for item in address_group_list if item not in element_to_remove] addresses_param = add_argument_list(addresses, 'member', False) addresses_path = XPATH_OBJECTS + "address-group/entry[@name='" + address_group_name + "']/static" description = demisto.args().get('description') tags = argToList(demisto.args()['tags']) if 'tags' in demisto.args() else None params = { 'action': 'edit', 'type': 'config', 'key': API_KEY, 'xpath': '', 'element': '' } address_group_output = {'Name': address_group_name} if DEVICE_GROUP: address_group_output['DeviceGroup'] = DEVICE_GROUP if type_ == 'dynamic' and match: params['xpath'] = match_path params['element'] = match_param result = http_request( URL, 'POST', body=params ) address_group_output['Match'] = match if type_ == 'static' and addresses: params['xpath'] = addresses_path params['element'] = "<static>" + addresses_param + "</static>" result = http_request( URL, 'POST', body=params ) address_group_output['Addresses'] = addresses if description: description_param = add_argument_open(description, 'description', False) description_path = XPATH_OBJECTS + "address-group/entry[@name='" + address_group_name + "']/description" params['xpath'] = description_path params['element'] = description_param result = http_request( URL, 'POST', body=params ) address_group_output['Description'] = description if tags: tag_param = add_argument_list(tags, 'tag', True) tag_path = XPATH_OBJECTS + "address-group/entry[@name='" + address_group_name + "']/tag" params['xpath'] = tag_path params['element'] = tag_param result = http_request( URL, 'POST', body=params ) address_group_output['Tags'] = tags demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Address Group was edited successfully.', 'EntryContext': { "Panorama.AddressGroups(val.Name == obj.Name)": address_group_output } }) ''' Services Commands ''' def prettify_services_arr(services_arr: list): if not isinstance(services_arr, list): return prettify_service(services_arr) pretty_services_arr = [] for service in services_arr: pretty_service = {'Name': service['@name']} if DEVICE_GROUP: pretty_service['DeviceGroup'] = DEVICE_GROUP if 'description' in service: pretty_service['Description'] = service['description'] if 'tag' in service and 'member' in service['tag']: pretty_service['Tags'] = service['tag']['member'] protocol = '' if 'protocol' in service: if 'tcp' in service['protocol']: protocol = 'tcp' elif 'udp' in service['protocol']: protocol = 'udp' else: protocol = 'sctp' pretty_service['Protocol'] = protocol if 'port' in service['protocol'][protocol]: pretty_service['DestinationPort'] = service['protocol'][protocol]['port'] if 'source-port' in service['protocol'][protocol]: pretty_service['SourcePort'] = service['protocol'][protocol]['source-port'] pretty_services_arr.append(pretty_service) return pretty_services_arr @logger def panorama_list_services(tag: str = None): params = { 'action': 'get', 'type': 'config', 'xpath': XPATH_OBJECTS + "service/entry", 'key': API_KEY } if tag: params['xpath'] += f'[( tag/member = \'{tag}\')]' result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_list_services_command(): """ Get all Services """ tag = demisto.args().get('tag') services_arr = panorama_list_services(tag) services_output = prettify_services_arr(services_arr) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': services_arr, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Services:', services_output, ['Name', 'Protocol', 'SourcePort', 'DestinationPort', 'Description', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.Services(val.Name == obj.Name)": services_output } }) def prettify_service(service: Dict): pretty_service = { 'Name': service['@name'], } if DEVICE_GROUP: pretty_service['DeviceGroup'] = DEVICE_GROUP if 'description' in service: pretty_service['Description'] = service['description'] if 'tag' in service and 'member' in service['tag']: pretty_service['Tags'] = service['tag']['member'] protocol = '' if 'protocol' in service: if 'tcp' in service['protocol']: protocol = 'tcp' elif 'udp' in service['protocol']: protocol = 'udp' else: protocol = 'sctp' pretty_service['Protocol'] = protocol if 'port' in service['protocol'][protocol]: pretty_service['DestinationPort'] = service['protocol'][protocol]['port'] if 'source-port' in service['protocol'][protocol]: pretty_service['SourcePort'] = service['protocol'][protocol]['source-port'] return pretty_service @logger def panorama_get_service(service_name: str): params = { 'action': 'show', 'type': 'config', 'xpath': XPATH_OBJECTS + "service/entry[@name='" + service_name + "']", 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_get_service_command(): """ Get a service """ service_name = demisto.args()['name'] service = panorama_get_service(service_name) service_output = prettify_service(service) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': service, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Address:', service_output, ['Name', 'Protocol', 'SourcePort', 'DestinationPort', 'Description', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.Services(val.Name == obj.Name)": service_output } }) @logger def panorama_create_service(service_name: str, protocol: str, destination_port: str, source_port: str = None, description: str = None, tags: list = None): params = { 'action': 'set', 'type': 'config', 'xpath': XPATH_OBJECTS + "service/entry[@name='" + service_name + "']", 'key': API_KEY, 'element': '<protocol>' + '<' + protocol + '>' + add_argument(destination_port, 'port', False) + add_argument(source_port, 'source-port', False) + '</' + protocol + '>' + '</protocol>' + add_argument(description, 'description', False) + add_argument_list(tags, 'tag', True) } result = http_request( URL, 'POST', body=params, ) return result def panorama_create_service_command(): """ Create a service object """ service_name = demisto.args()['name'] protocol = demisto.args()['protocol'] destination_port = demisto.args()['destination_port'] source_port = demisto.args().get('source_port') description = demisto.args().get('description') tags = argToList(demisto.args()['tags']) if 'tags' in demisto.args() else None service = panorama_create_service(service_name, protocol, destination_port, source_port, description, tags) service_output = { 'Name': service_name, 'Protocol': protocol, 'DestinationPort': destination_port } if DEVICE_GROUP: service_output['DeviceGroup'] = DEVICE_GROUP if source_port: service_output['SourcePort'] = source_port if description: service_output['Description'] = description if tags: service_output['Tags'] = tags demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': service, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Service was created successfully.', 'EntryContext': { "Panorama.Services(val.Name == obj.Name)": service_output } }) @logger def panorama_delete_service(service_name: str): params = { 'action': 'delete', 'type': 'config', 'xpath': XPATH_OBJECTS + "service/entry[@name='" + service_name + "']", 'element': "<entry name='" + service_name + "'></entry>", 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_delete_service_command(): """ Delete a service """ service_name = demisto.args()['name'] service = panorama_delete_service(service_name) service_output = {'Name': service_name} if DEVICE_GROUP: service_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': service, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Service was deleted successfully.', 'EntryContext': { "Panorama.Services(val.Name == obj.Name)": service_output } }) ''' Service Group Commands ''' def prettify_service_groups_arr(service_groups_arr: list): if not isinstance(service_groups_arr, list): return prettify_service_group(service_groups_arr) pretty_service_groups_arr = [] for service_group in service_groups_arr: pretty_service_group = { 'Name': service_group['@name'], 'Services': service_group['members']['member'] } if DEVICE_GROUP: pretty_service_group['DeviceGroup'] = DEVICE_GROUP if 'tag' in service_group and 'member' in service_group['tag']: pretty_service_group['Tags'] = service_group['tag']['member'] pretty_service_groups_arr.append(pretty_service_group) return pretty_service_groups_arr @logger def panorama_list_service_groups(tag: str = None): params = { 'action': 'get', 'type': 'config', 'xpath': XPATH_OBJECTS + "service-group/entry", 'key': API_KEY } if tag: params['xpath'] += f'[( tag/member = \'{tag}\')]' result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_list_service_groups_command(): """ Get all address groups """ tag = demisto.args().get('tag') service_groups_arr = panorama_list_service_groups(tag) service_groups_output = prettify_service_groups_arr(service_groups_arr) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': service_groups_arr, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Service groups:', service_groups_output, ['Name', 'Services', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.ServiceGroups(val.Name == obj.Name)": service_groups_output } }) def prettify_service_group(service_group: dict): pretty_service_group = { 'Name': service_group['@name'], 'Services': service_group['members']['member'] } if DEVICE_GROUP: pretty_service_group['DeviceGroup'] = DEVICE_GROUP if 'tag' in service_group and 'member' in service_group['tag']: pretty_service_group['Tags'] = service_group['tag']['member'] return pretty_service_group @logger def panorama_get_service_group(service_group_name): params = { 'action': 'show', 'type': 'config', 'xpath': XPATH_OBJECTS + "service-group/entry[@name='" + service_group_name + "']", 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_get_service_group_command(): """ Get an address group """ service_group_name = demisto.args()['name'] result = panorama_get_service_group(service_group_name) pretty_service_group = prettify_service_group(result) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Service group:', pretty_service_group, ['Name', 'Services', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.ServiceGroups(val.Name == obj.Name)": pretty_service_group } }) def panorama_create_service_group(service_group_name, services, tags): params = { 'action': 'set', 'type': 'config', 'xpath': XPATH_OBJECTS + "service-group/entry[@name='" + service_group_name + "']", 'element': '<members>' + add_argument_list(services, 'member', True) + '</members>' + add_argument_list(tags, 'tag', True), 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_create_service_group_command(): """ Create a service group """ service_group_name = demisto.args()['name'] services = argToList(demisto.args()['services']) tags = argToList(demisto.args()['tags']) if 'tags' in demisto.args() else None result = panorama_create_service_group(service_group_name, services, tags) service_group_output = { 'Name': service_group_name, 'Services': services } if DEVICE_GROUP: service_group_output['DeviceGroup'] = DEVICE_GROUP if tags: service_group_output['Tags'] = tags demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Service group was created successfully.', 'EntryContext': { "Panorama.ServiceGroups(val.Name == obj.Name)": service_group_output } }) @logger def panorama_delete_service_group(service_group_name): params = { 'action': 'delete', 'type': 'config', 'xpath': XPATH_OBJECTS + "service-group/entry[@name='" + service_group_name + "']", 'element': "<entry name='" + service_group_name + "'></entry>", 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_delete_service_group_command(): """ Delete a service group """ service_group_name = demisto.args()['name'] service_group = panorama_delete_service_group(service_group_name) service_group_output = {'Name': service_group_name} if DEVICE_GROUP: service_group_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': service_group, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Service group was deleted successfully.', 'EntryContext': { "Panorama.ServiceGroups(val.Name == obj.Name)": service_group_output } }) @logger def panorama_edit_service_group(service_group_name, services, tag): params = { 'action': 'edit', 'type': 'config', 'xpath': '', 'element': '', 'key': API_KEY, } if services: services_xpath = XPATH_OBJECTS + "service-group/entry[@name='" + service_group_name + "']/members" services_element = '<members>' + add_argument_list(services, 'member', False) + '</members>' params['xpath'] = services_xpath params['element'] = services_element result = http_request( URL, 'POST', body=params ) if tag: tag_xpath = XPATH_OBJECTS + "service-group/entry[@name='" + service_group_name + "']/tag" tag_element = add_argument_list(tag, 'tag', True) params['xpath'] = tag_xpath params['element'] = tag_element result = http_request( URL, 'POST', body=params ) return result def panorama_edit_service_group_command(): """ Edit a service group """ service_group_name = demisto.args()['name'] services_to_add = argToList(demisto.args()['services_to_add']) if 'services_to_add' in demisto.args() else None services_to_remove = argToList( demisto.args()['services_to_remove']) if 'services_to_remove' in demisto.args() else None tag = argToList(demisto.args()['tag']) if 'tag' in demisto.args() else None if not services_to_add and not services_to_remove and not tag: raise Exception('Specify at least one of the following arguments: services_to_add, services_to_remove, tag') if services_to_add and services_to_remove: raise Exception('Specify at most one of the following arguments: services_to_add, services_to_remove') services: List[str] = [] if services_to_add or services_to_remove: service_group_prev = panorama_get_service_group(service_group_name) service_group_list = argToList(service_group_prev['members']['member']) if services_to_add: services = list(set(services_to_add + service_group_list)) else: services = [item for item in service_group_list if item not in services_to_remove] if len(services) == 0: raise Exception('A Service group must have at least one service.') result = panorama_edit_service_group(service_group_name, services, tag) service_group_output = {'Name': service_group_name} if DEVICE_GROUP: service_group_output['DeviceGroup'] = DEVICE_GROUP if len(services) > 0: service_group_output['Services'] = services if tag: service_group_output['Tag'] = tag demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Service group was edited successfully.', 'EntryContext': { "Panorama.ServiceGroups(val.Name == obj.Name)": service_group_output } }) ''' Custom URL Category Commands ''' def prettify_custom_url_category(custom_url_category): pretty_custom_url_category = { 'Name': custom_url_category['@name'], } if DEVICE_GROUP: pretty_custom_url_category['DeviceGroup'] = DEVICE_GROUP if 'description' in custom_url_category: pretty_custom_url_category['Description'] = custom_url_category['description'] # In PAN-OS 9.X changes to the default behavior were introduced regarding custom url categories. if 'type' in custom_url_category: pretty_custom_url_category['Type'] = custom_url_category['type'] if pretty_custom_url_category['Type'] == 'Category Match': pretty_custom_url_category['Categories'] = custom_url_category['list']['member'] else: pretty_custom_url_category['Sites'] = custom_url_category['list']['member'] else: pretty_custom_url_category['Sites'] = custom_url_category['list']['member'] return pretty_custom_url_category @logger def panorama_get_custom_url_category(name): params = { 'action': 'get', 'type': 'config', 'xpath': XPATH_OBJECTS + "profiles/custom-url-category/entry[@name='" + name + "']", 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_get_custom_url_category_command(): """ Get a custom url category """ name = demisto.args()['name'] custom_url_category = panorama_get_custom_url_category(name) custom_url_category_output = prettify_custom_url_category(custom_url_category) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': custom_url_category, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Custom URL Category:', custom_url_category_output, ['Name', 'Type', 'Categories', 'Sites', 'Description'], removeNull=True), 'EntryContext': { "Panorama.CustomURLCategory(val.Name == obj.Name)": custom_url_category_output } }) @logger def panorama_create_custom_url_category(custom_url_category_name: str, type_: Any = None, sites=None, categories=None, description: str = None): # In PAN-OS 9.X changes to the default behavior were introduced regarding custom url categories. major_version = get_pan_os_major_version() element = add_argument(description, 'description', False) if major_version <= 8: if type_ or categories: raise Exception('The type and categories arguments are only relevant for PAN-OS 9.x versions.') element += add_argument_list(sites, 'list', True) else: # major is 9.x if not type_: raise Exception('The type argument is mandatory for PAN-OS 9.x versions.') if (not sites and not categories) or (sites and categories): raise Exception('Exactly one of the sites and categories arguments should be defined.') if (type_ == 'URL List' and categories) or (type_ == 'Category Match' and sites): raise Exception('URL List type is only for sites, Category Match is only for categories.') if type_ == 'URL List': element += add_argument_list(sites, 'list', True) else: element += add_argument_list(categories, 'list', True) element += add_argument(type_, 'type', False) params = { 'action': 'set', 'type': 'config', 'xpath': XPATH_OBJECTS + "profiles/custom-url-category/entry[@name='" + custom_url_category_name + "']", 'element': element, 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) custom_url_category_output = {'Name': custom_url_category_name} if DEVICE_GROUP: custom_url_category_output['DeviceGroup'] = DEVICE_GROUP if description: custom_url_category_output['Description'] = description if type_: custom_url_category_output['Type'] = type_ if sites: custom_url_category_output['Sites'] = sites else: custom_url_category_output['Categories'] = categories return result, custom_url_category_output def panorama_create_custom_url_category_command(): """ Create a custom URL category """ custom_url_category_name = demisto.args()['name'] type_ = demisto.args()['type'] if 'type' in demisto.args() else None sites = argToList(demisto.args()['sites']) if 'sites' in demisto.args() else None categories = argToList(demisto.args()['categories']) if 'categories' in demisto.args() else None description = demisto.args().get('description') custom_url_category, custom_url_category_output = panorama_create_custom_url_category(custom_url_category_name, type_, sites, categories, description) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': custom_url_category, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Created Custom URL Category:', custom_url_category_output, ['Name', 'Type', 'Categories', 'Sites', 'Description'], removeNull=True), 'EntryContext': { "Panorama.CustomURLCategory(val.Name == obj.Name)": custom_url_category_output } }) @logger def panorama_delete_custom_url_category(custom_url_category_name): params = { 'action': 'delete', 'type': 'config', 'xpath': XPATH_OBJECTS + "profiles/custom-url-category/entry[@name='" + custom_url_category_name + "']", 'element': "<entry name='" + custom_url_category_name + "'></entry>", 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_delete_custom_url_category_command(): """ Delete a custom url category """ custom_url_category_name = demisto.args()['name'] result = panorama_delete_custom_url_category(custom_url_category_name) custom_url_category_output = {'Name': custom_url_category_name} if DEVICE_GROUP: custom_url_category_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Custom URL category was deleted successfully.', 'EntryContext': { "Panorama.CustomURLCategory(val.Name == obj.Name)": custom_url_category_output } }) @logger def panorama_edit_custom_url_category(custom_url_category_name, type_, items, description=None): major_version = get_pan_os_major_version() description_element = add_argument(description, 'description', False) items_element = add_argument_list(items, 'list', True) if major_version <= 8: if type_ == 'Category Match': raise Exception('The Categories argument is only relevant for PAN-OS 9.x versions.') element = f"<entry name='{custom_url_category_name}'>{description_element}{items_element}</entry>" else: type_element = add_argument(type_, 'type', False) element = f"<entry name='{custom_url_category_name}'>{description_element}{items_element}{type_element}</entry>" params = { 'action': 'edit', 'type': 'config', 'xpath': XPATH_OBJECTS + "profiles/custom-url-category/entry[@name='" + custom_url_category_name + "']", 'element': element, 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) custom_url_category_output = {'Name': custom_url_category_name, 'Type': type_} if DEVICE_GROUP: custom_url_category_output['DeviceGroup'] = DEVICE_GROUP if description: custom_url_category_output['Description'] = description if type_ == 'Category Match': custom_url_category_output['Categories'] = items else: custom_url_category_output['Sites'] = items return result, custom_url_category_output def panorama_custom_url_category_add_items(custom_url_category_name, items, type_): """ Add sites or categories to a configured custom url category """ custom_url_category = panorama_get_custom_url_category(custom_url_category_name) if '@dirtyId' in custom_url_category: raise Exception('Please commit the instance prior to editing the Custom URL Category.') description = custom_url_category.get('description') custom_url_category_items: List[str] = [] if 'list' in custom_url_category: if custom_url_category['list']: custom_url_category_items = argToList(custom_url_category['list']['member']) merged_items = list((set(items)).union(set(custom_url_category_items))) result, custom_url_category_output = panorama_edit_custom_url_category(custom_url_category_name, type_, merged_items, description) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Updated Custom URL Category:', custom_url_category_output, ['Name', 'Type', 'Categories', 'Sites', 'Description'], removeNull=True), 'EntryContext': { "Panorama.CustomURLCategory(val.Name == obj.Name)": custom_url_category_output } }) def panorama_custom_url_category_remove_items(custom_url_category_name, items, type_): """ Add sites or categories to a configured custom url category """ custom_url_category = panorama_get_custom_url_category(custom_url_category_name) if '@dirtyId' in custom_url_category: raise Exception('Please commit the instance prior to editing the Custom URL Category.') description = custom_url_category.get('description') if 'list' in custom_url_category: if 'member' in custom_url_category['list']: custom_url_category_items = custom_url_category['list']['member'] if not custom_url_category_items: raise Exception('Custom url category does not contain sites or categories.') subtracted_items = [item for item in custom_url_category_items if item not in items] result, custom_url_category_output = panorama_edit_custom_url_category(custom_url_category_name, type_, subtracted_items, description) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Updated Custom URL Category:', custom_url_category_output, ['Name', 'Categories', 'Sites', 'Description'], removeNull=True), 'EntryContext': { "Panorama.CustomURLCategory(val.Name == obj.Name)": custom_url_category_output } }) def panorama_edit_custom_url_category_command(): custom_url_category_name = demisto.args()['name'] items = argToList(demisto.args()['sites']) if 'sites' in demisto.args() else argToList(demisto.args()['categories']) type_ = "URL List" if 'sites' in demisto.args() else "Category Match" if demisto.args()['action'] == 'remove': panorama_custom_url_category_remove_items(custom_url_category_name, items, type_) else: panorama_custom_url_category_add_items(custom_url_category_name, items, type_) ''' URL Filtering ''' @logger def panorama_get_url_category(url_cmd, url): params = { 'action': 'show', 'type': 'op', 'key': API_KEY, 'cmd': f'<test><{url_cmd}>{url}</{url_cmd}></test>' } raw_result = http_request( URL, 'POST', body=params, ) result = raw_result['response']['result'] if url_cmd == 'url-info-host': category = result.split(': ')[1] else: result = result.splitlines()[1] if url_cmd == 'url': category = result.split(' ')[1] else: # url-info-cloud category = result.split(',')[3] return category def populate_url_filter_category_from_context(category): url_filter_category = demisto.dt(demisto.context(), f'Panorama.URLFilter(val.Category === "{category}")') if not url_filter_category: return [] if type(url_filter_category) is list: return url_filter_category[0].get("URL") else: # url_filter_category is a dict context_urls = url_filter_category.get("URL", None) # pylint: disable=no-member if type(context_urls) is str: return [context_urls] else: return context_urls def panorama_get_url_category_command(url_cmd: str): """ Get the url category from Palo Alto URL Filtering """ urls = argToList(demisto.args()['url']) categories_dict: Dict[str, list] = {} categories_dict_hr: Dict[str, list] = {} for url in urls: category = panorama_get_url_category(url_cmd, url) if category in categories_dict: categories_dict[category].append(url) categories_dict_hr[category].append(url) else: categories_dict[category] = [url] categories_dict_hr[category] = [url] context_urls = populate_url_filter_category_from_context(category) categories_dict[category] = list((set(categories_dict[category])).union(set(context_urls))) url_category_output_hr = [] for key, value in categories_dict_hr.items(): url_category_output_hr.append({ 'Category': key, 'URL': value }) url_category_output = [] for key, value in categories_dict.items(): url_category_output.append({ 'Category': key, 'URL': value }) title = 'URL Filtering' if url_cmd == 'url-info-cloud': title += ' from cloud' elif url_cmd == 'url-info-host': title += ' from host' human_readable = tableToMarkdown(f'{title}:', url_category_output_hr, ['URL', 'Category'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': categories_dict, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': { "Panorama.URLFilter(val.Category === obj.Category)": url_category_output } }) ''' URL Filter ''' def prettify_get_url_filter(url_filter): pretty_url_filter = {'Name': url_filter['@name']} if DEVICE_GROUP: pretty_url_filter['DeviceGroup'] = DEVICE_GROUP if 'description' in url_filter: pretty_url_filter['Description'] = url_filter['description'] pretty_url_filter['Category'] = [] url_category_list: List[str] = [] action: str if 'alert' in url_filter: url_category_list = url_filter['alert']['member'] action = 'alert' elif 'allow' in url_filter: url_category_list = url_filter['allow']['member'] action = 'allow' elif 'block' in url_filter: url_category_list = url_filter['block']['member'] action = 'block' elif 'continue' in url_filter: url_category_list = url_filter['continue']['member'] action = 'continue' elif 'override' in url_filter: url_category_list = url_filter['override']['member'] action = 'override' for category in url_category_list: pretty_url_filter['Category'].append({ 'Name': category, 'Action': action }) if 'allow-list' in url_filter or 'block-list' in url_filter: pretty_url_filter['Overrides'] = [] if 'allow-list' in url_filter: pretty_url_filter['OverrideAllowList'] = url_filter['allow-list']['member'] else: pretty_url_filter['OverrideBlockList'] = url_filter['block-list']['member'] return pretty_url_filter @logger def panorama_get_url_filter(name): params = { 'action': 'get', 'type': 'config', 'xpath': XPATH_OBJECTS + "profiles/url-filtering/entry[@name='" + name + "']", 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_get_url_filter_command(): """ Get a URL Filter """ name = demisto.args()['name'] url_filter = panorama_get_url_filter(name) url_filter_output = prettify_get_url_filter(url_filter) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': url_filter, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('URL Filter:', url_filter_output, ['Name', 'Category', 'OverrideAllowList', 'OverrideBlockList', 'Description'], removeNull=True), 'EntryContext': { "Panorama.URLFilter(val.Name == obj.Name)": url_filter_output } }) @logger def panorama_create_url_filter( url_filter_name, action, url_category_list, override_allow_list=None, override_block_list=None, description=None): element = add_argument_list(url_category_list, action, True) + add_argument_list(override_allow_list, 'allow-list', True) + add_argument_list( override_block_list, 'block-list', True) + add_argument(description, 'description', False) + "<action>block</action>" params = { 'action': 'set', 'type': 'config', 'xpath': XPATH_OBJECTS + "profiles/url-filtering/entry[@name='" + url_filter_name + "']", 'element': element, 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_create_url_filter_command(): """ Create a URL Filter """ url_filter_name = demisto.args()['name'] action = demisto.args()['action'] url_category_list = argToList(demisto.args()['url_category']) override_allow_list = argToList(demisto.args().get('override_allow_list')) override_block_list = argToList(demisto.args().get('override_block_list')) description = demisto.args().get('description') result = panorama_create_url_filter(url_filter_name, action, url_category_list, override_allow_list, override_block_list, description) url_filter_output = {'Name': url_filter_name} if DEVICE_GROUP: url_filter_output['DeviceGroup'] = DEVICE_GROUP url_filter_output['Category'] = [] for category in url_category_list: url_filter_output['Category'].append({ 'Name': category, 'Action': action }) if override_allow_list: url_filter_output['OverrideAllowList'] = override_allow_list if override_block_list: url_filter_output['OverrideBlockList'] = override_block_list if description: url_filter_output['Description'] = description demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'URL Filter was created successfully.', 'EntryContext': { "Panorama.URLFilter(val.Name == obj.Name)": url_filter_output } }) @logger def panorama_edit_url_filter(url_filter_name, element_to_change, element_value, add_remove_element=None): url_filter_prev = panorama_get_url_filter(url_filter_name) if '@dirtyId' in url_filter_prev: raise Exception('Please commit the instance prior to editing the URL Filter.') url_filter_output = {'Name': url_filter_name} if DEVICE_GROUP: url_filter_output['DeviceGroup'] = DEVICE_GROUP params = { 'action': 'edit', 'type': 'config', 'key': API_KEY, } if element_to_change == 'description': params['xpath'] = XPATH_OBJECTS + "profiles/url-filtering/entry[@name='" + url_filter_name + "']/" + element_to_change params['element'] = add_argument_open(element_value, 'description', False) result = http_request(URL, 'POST', body=params) url_filter_output['Description'] = element_value elif element_to_change == 'override_allow_list': prev_override_allow_list = argToList(url_filter_prev['allow-list']['member']) if add_remove_element == 'add': new_override_allow_list = list((set(prev_override_allow_list)).union(set([element_value]))) else: new_override_allow_list = [url for url in prev_override_allow_list if url != element_value] params['xpath'] = XPATH_OBJECTS + "profiles/url-filtering/entry[@name='" + url_filter_name + "']/allow-list" params['element'] = add_argument_list(new_override_allow_list, 'allow-list', True) result = http_request(URL, 'POST', body=params) url_filter_output[element_to_change] = new_override_allow_list # element_to_change == 'override_block_list' else: prev_override_block_list = argToList(url_filter_prev['block-list']['member']) if add_remove_element == 'add': new_override_block_list = list((set(prev_override_block_list)).union(set([element_value]))) else: new_override_block_list = [url for url in prev_override_block_list if url != element_value] params['xpath'] = XPATH_OBJECTS + "profiles/url-filtering/entry[@name='" + url_filter_name + "']/block-list" params['element'] = add_argument_list(new_override_block_list, 'block-list', True) result = http_request(URL, 'POST', body=params) url_filter_output[element_to_change] = new_override_block_list return result, url_filter_output def panorama_edit_url_filter_command(): """ Edit a URL Filter """ url_filter_name = demisto.args()['name'] element_to_change = demisto.args()['element_to_change'] add_remove_element = demisto.args()['add_remove_element'] element_value = demisto.args()['element_value'] result, url_filter_output = panorama_edit_url_filter(url_filter_name, element_to_change, element_value, add_remove_element) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'URL Filter was edited successfully.', 'EntryContext': { "Panorama.URLFilter(val.Name == obj.Name)": url_filter_output } }) @logger def panorama_delete_url_filter(url_filter_name): params = { 'action': 'delete', 'type': 'config', 'xpath': XPATH_OBJECTS + "profiles/url-filtering/entry[@name='" + url_filter_name + "']", 'element': "<entry name='" + url_filter_name + "'></entry>", 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_delete_url_filter_command(): """ Delete a custom url category """ url_filter_name = demisto.args()['name'] result = panorama_delete_url_filter(url_filter_name) url_filter_output = {'Name': url_filter_name} if DEVICE_GROUP: url_filter_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'URL Filter was deleted successfully.', 'EntryContext': { "Panorama.URLFilter(val.Name == obj.Name)": url_filter_output } }) ''' Security Rules Managing ''' def prettify_rule(rule): pretty_rule = { 'Name': rule['@name'], 'Action': rule['action'] } if DEVICE_GROUP: pretty_rule['DeviceGroup'] = DEVICE_GROUP if '@loc' in rule: pretty_rule['Location'] = rule['@loc'] if 'category' in rule and 'member' in rule['category']: pretty_rule['CustomUrlCategory'] = rule['category']['member'] if 'application' in rule and 'member' in rule['application']: pretty_rule['Application'] = rule['application']['member'] if 'destination' in rule and 'member' in rule['destination']: pretty_rule['Destination'] = rule['destination']['member'] if 'from' in rule and 'member' in rule['from']: pretty_rule['From'] = rule['from']['member'] if 'service' in rule and 'member' in rule['service']: pretty_rule['Service'] = rule['service']['member'] if 'to' in rule and 'member' in rule['to']: pretty_rule['To'] = rule['to']['member'] if 'source' in rule and 'member' in rule['source']: pretty_rule['Source'] = rule['source']['member'] if 'tag' in rule and 'member' in rule['tag']: pretty_rule['Tags'] = rule['tag']['member'] if 'log-setting' in rule and '#text' in rule['log-setting']: pretty_rule['LogForwardingProfile'] = rule['log-setting']['#text'] return pretty_rule def prettify_rules(rules): if not isinstance(rules, list): return prettify_rule(rules) pretty_rules_arr = [] for rule in rules: pretty_rule = prettify_rule(rule) pretty_rules_arr.append(pretty_rule) return pretty_rules_arr @logger def panorama_list_rules(xpath: str, tag: str = None): params = { 'action': 'get', 'type': 'config', 'xpath': xpath, 'key': API_KEY } if tag: params['xpath'] += f'[( tag/member = \'{tag}\')]' result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_list_rules_command(): """ List security rules """ if DEVICE_GROUP: if 'pre_post' not in demisto.args(): raise Exception('Please provide the pre_post argument when listing rules in Panorama instance.') else: xpath = XPATH_SECURITY_RULES + demisto.args()['pre_post'] + '/security/rules/entry' else: xpath = XPATH_SECURITY_RULES tag = demisto.args().get('tag') rules = panorama_list_rules(xpath, tag) pretty_rules = prettify_rules(rules) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': rules, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Security Rules:', pretty_rules, ['Name', 'Location', 'Action', 'From', 'To', 'CustomUrlCategory', 'Service', 'Tags'], removeNull=True), 'EntryContext': { "Panorama.SecurityRule(val.Name == obj.Name)": pretty_rules } }) @logger def panorama_move_rule_command(): """ Move a security rule """ rulename = demisto.args()['rulename'] params = { 'type': 'config', 'action': 'move', 'key': API_KEY, 'where': demisto.args()['where'], } if DEVICE_GROUP: if 'pre_post' not in demisto.args(): raise Exception('Please provide the pre_post argument when moving a rule in Panorama instance.') else: params['xpath'] = XPATH_SECURITY_RULES + demisto.args()[ 'pre_post'] + '/security/rules/entry' + '[@name=\'' + rulename + '\']' else: params['xpath'] = XPATH_SECURITY_RULES + '[@name=\'' + rulename + '\']' if 'dst' in demisto.args(): params['dst'] = demisto.args()['dst'] result = http_request(URL, 'POST', body=params) rule_output = {'Name': rulename} if DEVICE_GROUP: rule_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Rule ' + rulename + ' moved successfully.', 'EntryContext': { "Panorama.SecurityRule(val.Name == obj.Name)": rule_output } }) ''' Security Rule Configuration ''' @logger def panorama_create_rule_command(): """ Create a security rule """ rulename = demisto.args()['rulename'] if 'rulename' in demisto.args() else ('demisto-' + (str(uuid.uuid4()))[:8]) source = argToList(demisto.args().get('source')) destination = argToList(demisto.args().get('destination')) negate_source = demisto.args().get('negate_source') negate_destination = demisto.args().get('negate_destination') action = demisto.args().get('action') service = demisto.args().get('service') disable = demisto.args().get('disable') categories = argToList(demisto.args().get('category')) application = argToList(demisto.args().get('application')) source_user = demisto.args().get('source_user') disable_server_response_inspection = demisto.args().get('disable_server_response_inspection') description = demisto.args().get('description') target = demisto.args().get('target') log_forwarding = demisto.args().get('log_forwarding', None) tags = argToList(demisto.args()['tags']) if 'tags' in demisto.args() else None if not DEVICE_GROUP: if target: raise Exception('The target argument is relevant only for a Palo Alto Panorama instance.') elif log_forwarding: raise Exception('The log_forwarding argument is relevant only for a Palo Alto Panorama instance.') params = prepare_security_rule_params(api_action='set', rulename=rulename, source=source, destination=destination, negate_source=negate_source, negate_destination=negate_destination, action=action, service=service, disable=disable, application=application, source_user=source_user, disable_server_response_inspection=disable_server_response_inspection, description=description, target=target, log_forwarding=log_forwarding, tags=tags, category=categories) result = http_request( URL, 'POST', body=params ) rule_output = {SECURITY_RULE_ARGS[key]: value for key, value in demisto.args().items() if key in SECURITY_RULE_ARGS} rule_output['Name'] = rulename if DEVICE_GROUP: rule_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Rule configured successfully.', 'EntryContext': { "Panorama.SecurityRule(val.Name == obj.Name)": rule_output } }) @logger def panorama_get_current_element(element_to_change: str, xpath: str) -> list: """ Get the current element value from """ params = { 'type': 'config', 'action': 'get', 'xpath': xpath, 'key': API_KEY } try: response = http_request(URL, 'GET', params=params) except PAN_OS_Not_Found: return [] result = response.get('response').get('result') if '@dirtyId' in result: raise Exception('Please commit the instance prior to editing the Security rule.') current_object = result.get(element_to_change) if 'list' in current_object: current_objects_items = argToList(current_object['list']['member']) elif 'member' in current_object: current_objects_items = argToList(current_object.get('member')) return current_objects_items @logger def panorama_edit_rule_items(rulename: str, element_to_change: str, element_value: List[str], behaviour: str): listable_elements = ['source', 'destination', 'application', 'category', 'source-user', 'service', 'tag'] if element_to_change not in listable_elements: raise Exception(f'Adding objects is only available for the following Objects types:{listable_elements}') if element_to_change == 'target' and not DEVICE_GROUP: raise Exception('The target argument is relevant only for a Palo Alto Panorama instance.') params = { 'type': 'config', 'action': 'edit', 'key': API_KEY } if DEVICE_GROUP: if 'pre_post' not in demisto.args(): raise Exception('please provide the pre_post argument when editing a rule in Panorama instance.') else: params['xpath'] = XPATH_SECURITY_RULES + demisto.args()[ 'pre_post'] + '/security/rules/entry' + '[@name=\'' + rulename + '\']' else: params['xpath'] = XPATH_SECURITY_RULES + '[@name=\'' + rulename + '\']' params['xpath'] += '/' + element_to_change current_objects_items = panorama_get_current_element(element_to_change, params['xpath']) if behaviour == 'add': values = list((set(current_objects_items)).union(set(element_value))) else: # remove values = [item for item in current_objects_items if item not in element_value] if not values: raise Exception(f'The object: {element_to_change} must have at least one item.') params['element'] = add_argument_list(values, element_to_change, True) result = http_request(URL, 'POST', body=params) rule_output = { 'Name': rulename, SECURITY_RULE_ARGS[element_to_change]: values } if DEVICE_GROUP: rule_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Rule edited successfully.', 'EntryContext': { "Panorama.SecurityRule(val.Name == obj.Name)": rule_output } }) @logger def panorama_edit_rule_command(): """ Edit a security rule """ rulename = demisto.args()['rulename'] element_to_change = demisto.args()['element_to_change'] if element_to_change == 'log-forwarding': element_to_change = 'log-setting' element_value = demisto.args()['element_value'] if element_to_change == 'target' and not DEVICE_GROUP: raise Exception('The target argument is relevant only for a Palo Alto Panorama instance.') behaviour = demisto.args().get('behaviour') if 'behaviour' in demisto.args() else 'replace' if behaviour != 'replace': panorama_edit_rule_items(rulename, element_to_change, argToList(element_value), behaviour) else: params = { 'type': 'config', 'action': 'edit', 'key': API_KEY } if element_to_change in ['action', 'description', 'log-setting']: params['element'] = add_argument_open(element_value, element_to_change, False) elif element_to_change in ['source', 'destination', 'application', 'category', 'source-user', 'service', 'tag']: element_value = argToList(element_value) params['element'] = add_argument_list(element_value, element_to_change, True) elif element_to_change == 'target': params['element'] = add_argument_target(element_value, 'target') else: params['element'] = add_argument_yes_no(element_value, element_to_change) if DEVICE_GROUP: if 'pre_post' not in demisto.args(): raise Exception('please provide the pre_post argument when editing a rule in Panorama instance.') else: params['xpath'] = XPATH_SECURITY_RULES + demisto.args()[ 'pre_post'] + '/security/rules/entry' + '[@name=\'' + rulename + '\']' else: params['xpath'] = XPATH_SECURITY_RULES + '[@name=\'' + rulename + '\']' params['xpath'] += '/' + element_to_change result = http_request(URL, 'POST', body=params) rule_output = { 'Name': rulename, SECURITY_RULE_ARGS[element_to_change]: element_value } if DEVICE_GROUP: rule_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Rule edited successfully.', 'EntryContext': { "Panorama.SecurityRule(val.Name == obj.Name)": rule_output } }) @logger def panorama_delete_rule_command(): """ Delete a security rule """ rulename = demisto.args()['rulename'] params = { 'type': 'config', 'action': 'delete', 'key': API_KEY } if DEVICE_GROUP: if 'pre_post' not in demisto.args(): raise Exception('Please provide the pre_post argument when moving a rule in Panorama instance.') else: params['xpath'] = XPATH_SECURITY_RULES + demisto.args()[ 'pre_post'] + '/security/rules/entry' + '[@name=\'' + rulename + '\']' else: params['xpath'] = XPATH_SECURITY_RULES + '[@name=\'' + rulename + '\']' result = http_request( URL, 'POST', body=params ) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Rule deleted successfully.', }) @logger def panorama_custom_block_rule_command(): """ Block an object in Panorama """ object_type = demisto.args()['object_type'] object_value = argToList(demisto.args()['object_value']) direction = demisto.args()['direction'] if 'direction' in demisto.args() else 'both' rulename = demisto.args()['rulename'] if 'rulename' in demisto.args() else ('demisto-' + (str(uuid.uuid4()))[:8]) block_destination = False if direction == 'from' else True block_source = False if direction == 'to' else True target = argToList(demisto.args().get('target')) if 'target' in demisto.args() else None log_forwarding = demisto.args().get('log_forwarding', None) tags = argToList(demisto.args()['tags']) if 'tags' in demisto.args() else None if not DEVICE_GROUP: if target: raise Exception('The target argument is relevant only for a Palo Alto Panorama instance.') elif log_forwarding: raise Exception('The log_forwarding argument is relevant only for a Palo Alto Panorama instance.') custom_block_output = { 'Name': rulename, 'Direction': direction, 'Disabled': False } if DEVICE_GROUP: custom_block_output['DeviceGroup'] = DEVICE_GROUP if log_forwarding: custom_block_output['LogForwarding'] = log_forwarding if target: custom_block_output['Target'] = target if tags: custom_block_output['Tags'] = tags if object_type == 'ip': if block_source: params = prepare_security_rule_params(api_action='set', action='drop', source=object_value, destination=['any'], rulename=rulename + '-from', target=target, log_forwarding=log_forwarding, tags=tags) result = http_request(URL, 'POST', body=params) if block_destination: params = prepare_security_rule_params(api_action='set', action='drop', destination=object_value, source=['any'], rulename=rulename + '-to', target=target, log_forwarding=log_forwarding, tags=tags) result = http_request(URL, 'POST', body=params) custom_block_output['IP'] = object_value elif object_type in ['address-group', 'edl']: if block_source: params = prepare_security_rule_params(api_action='set', action='drop', source=object_value, destination=['any'], rulename=rulename + '-from', target=target, log_forwarding=log_forwarding, tags=tags) result = http_request(URL, 'POST', body=params) if block_destination: params = prepare_security_rule_params(api_action='set', action='drop', destination=object_value, source=['any'], rulename=rulename + '-to', target=target, log_forwarding=log_forwarding, tags=tags) result = http_request(URL, 'POST', body=params) custom_block_output['AddressGroup'] = object_value elif object_type == 'url-category': params = prepare_security_rule_params(api_action='set', action='drop', source=['any'], destination=['any'], category=object_value, rulename=rulename, target=target, log_forwarding=log_forwarding, tags=tags) result = http_request(URL, 'POST', body=params) custom_block_output['CustomURLCategory'] = object_value elif object_type == 'application': params = prepare_security_rule_params(api_action='set', action='drop', source=['any'], destination=['any'], application=object_value, rulename=rulename, target=target, log_forwarding=log_forwarding, tags=tags) result = http_request(URL, 'POST', body=params) custom_block_output['Application'] = object_value demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Object was blocked successfully.', 'EntryContext': { "Panorama.SecurityRule(val.Name == obj.Name)": custom_block_output } }) ''' PCAPS ''' @logger def panorama_list_pcaps_command(): """ Get list of pcap files """ if DEVICE_GROUP: raise Exception('PCAP listing is only supported on Firewall (not Panorama).') pcap_type = demisto.args()['pcapType'] params = { 'type': 'export', 'key': API_KEY, 'category': pcap_type } if 'password' in demisto.args(): params['dlp-password'] = demisto.args()['password'] elif demisto.args()['pcapType'] == 'dlp-pcap': raise Exception('can not provide dlp-pcap without password') result = http_request(URL, 'GET', params=params) json_result = json.loads(xml2json(result.text))['response'] if json_result['@status'] != 'success': raise Exception('Request to get list of Pcaps Failed.\nStatus code: ' + str( json_result['response']['@code']) + '\nWith message: ' + str(json_result['response']['msg']['line'])) dir_listing = json_result['result']['dir-listing'] if 'file' not in dir_listing: demisto.results(f'PAN-OS has no Pcaps of type: {pcap_type}.') else: pcaps = dir_listing['file'] pcap_list = [pcap[1:] for pcap in pcaps] demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': json_result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('List of Pcaps:', pcap_list, ['Pcap name']), 'EntryContext': { "Panorama.Pcaps(val.Name == obj.Name)": pcap_list } }) def validate_search_time(search_time: str) -> str: """ Validate search_time is of format YYYY/MM/DD HH:MM:SS or YYYY/MM/DD and pad with zeroes """ try: datetime.strptime(search_time, '%Y/%m/%d') search_time += ' 00:00:00' return search_time except ValueError: pass try: datetime.strptime(search_time, '%Y/%m/%d %H:%M:%S') return search_time except ValueError as err: raise ValueError(f"Incorrect data format. searchTime should be of: YYYY/MM/DD HH:MM:SS or YYYY/MM/DD.\n" f"Error is: {str(err)}") @logger def panorama_get_pcap_command(): """ Get pcap file """ if DEVICE_GROUP: raise Exception('Getting a PCAP file is only supported on Firewall (not Panorama).') pcap_type = demisto.args()['pcapType'] params = { 'type': 'export', 'key': API_KEY, 'category': pcap_type } password = demisto.args().get('password') pcap_id = demisto.args().get('pcapID') search_time = demisto.args().get('searchTime') if pcap_type == 'dlp-pcap' and not password: raise Exception('Can not provide dlp-pcap without password.') else: params['dlp-password'] = password if pcap_type == 'threat-pcap' and (not pcap_id or not search_time): raise Exception('Can not provide threat-pcap without pcap-id and the searchTime arguments.') pcap_name = demisto.args().get('from') local_name = demisto.args().get('localName') serial_no = demisto.args().get('serialNo') search_time = demisto.args().get('searchTime') file_name = None if pcap_id: params['pcap-id'] = pcap_id if pcap_name: params['from'] = pcap_name file_name = pcap_name if local_name: params['to'] = local_name file_name = local_name if serial_no: params['serialno'] = serial_no if search_time: search_time = validate_search_time(search_time) params['search-time'] = search_time # set file name to the current time if from/to were not specified if not file_name: file_name = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S') result = http_request(URL, 'GET', params=params) # due pcap file size limitation in the product, for more details, please see the documentation. if result.headers['Content-Type'] != 'application/octet-stream': raise Exception( 'PCAP download failed. Most likely cause is the file size limitation.\n' 'For information on how to download manually, see the documentation for this integration.') file = fileResult(file_name + ".pcap", result.content) demisto.results(file) ''' Applications ''' def prettify_applications_arr(applications_arr): pretty_application_arr = [] if not isinstance(applications_arr, list): applications_arr = [applications_arr] for i in range(len(applications_arr)): application = applications_arr[i] pretty_application_arr.append({ 'SubCategory': application.get('subcategory'), 'Risk': application.get('risk'), 'Technology': application.get('technology'), 'Name': application.get('@name'), 'Description': application.get('description'), 'Id': application.get('@id'), }) return pretty_application_arr @logger def panorama_list_applications(predefined: bool): major_version = get_pan_os_major_version() params = { 'type': 'config', 'action': 'get', 'key': API_KEY } if predefined: if major_version < 9: raise Exception('Listing predefined applications is only available for PAN-OS 9.X and above versions.') else: params['xpath'] = '/config/predefined/application' else: params['xpath'] = XPATH_OBJECTS + "application/entry" result = http_request( URL, 'POST', body=params ) applications = result['response']['result'] if predefined: application_arr = applications.get('application', {}).get('entry') else: if major_version < 9: application_arr = applications.get('entry') else: application_arr = applications.get('application') return application_arr def panorama_list_applications_command(): """ List all applications """ predefined = str(demisto.args().get('predefined', '')) == 'true' applications_arr = panorama_list_applications(predefined) applications_arr_output = prettify_applications_arr(applications_arr) headers = ['Id', 'Name', 'Risk', 'Category', 'SubCategory', 'Technology', 'Description'] demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': applications_arr, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Applications', t=applications_arr_output, headers=headers), 'EntryContext': { "Panorama.Applications(val.Name == obj.Name)": applications_arr_output } }) ''' External Dynamic Lists Commands ''' def prettify_edls_arr(edls_arr): pretty_edls_arr = [] if not isinstance(edls_arr, list): # handle case of only one edl in the instance return prettify_edl(edls_arr) for edl in edls_arr: pretty_edl = { 'Name': edl['@name'], 'Type': ''.join(edl['type'].keys()) } edl_type = pretty_edl['Type'] if edl['type'][edl_type]: if 'url' in edl['type'][edl_type]: pretty_edl['URL'] = edl['type'][edl_type]['url'] if 'certificate-profile' in edl['type'][edl_type]: pretty_edl['CertificateProfile'] = edl['type'][edl_type]['certificate-profile'] if 'recurring' in edl['type'][edl_type]: pretty_edl['Recurring'] = ''.join(edl['type'][edl_type]['recurring'].keys()) if 'description' in edl['type'][edl_type]: pretty_edl['Description'] = edl['type'][edl_type]['description'] if DEVICE_GROUP: pretty_edl['DeviceGroup'] = DEVICE_GROUP pretty_edls_arr.append(pretty_edl) return pretty_edls_arr @logger def panorama_list_edls(): params = { 'action': 'get', 'type': 'config', 'xpath': XPATH_OBJECTS + "external-list/entry", 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_list_edls_command(): """ Get all EDLs """ edls_arr = panorama_list_edls() edls_output = prettify_edls_arr(edls_arr) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': edls_arr, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('External Dynamic Lists:', edls_output, ['Name', 'Type', 'URL', 'Recurring', 'CertificateProfile', 'Description'], removeNull=True), 'EntryContext': { "Panorama.EDL(val.Name == obj.Name)": edls_output } }) def prettify_edl(edl): pretty_edl = { 'Name': edl['@name'], 'Type': ''.join(edl['type'].keys()) } edl_type = pretty_edl['Type'] if edl['type'][edl_type]: if 'url' in edl['type'][edl_type]: pretty_edl['URL'] = edl['type'][edl_type]['url'] if 'certificate-profile' in edl['type'][edl_type]: pretty_edl['CertificateProfile'] = edl['type'][edl_type]['certificate-profile'] if 'recurring' in edl['type'][edl_type]: pretty_edl['Recurring'] = ''.join(edl['type'][edl_type]['recurring'].keys()) if 'description' in edl['type'][edl_type]: pretty_edl['Description'] = edl['type'][edl_type]['description'] if DEVICE_GROUP: pretty_edl['DeviceGroup'] = DEVICE_GROUP return pretty_edl @logger def panorama_get_edl(edl_name): params = { 'action': 'show', 'type': 'config', 'xpath': XPATH_OBJECTS + "external-list/entry[@name='" + edl_name + "']", 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result['response']['result']['entry'] def panorama_get_edl_command(): """ Get an EDL """ edl_name = demisto.args()['name'] edl = panorama_get_edl(edl_name) edl_output = prettify_edl(edl) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': edl, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('External Dynamic List:', edl_output, ['Name', 'Type', 'URL', 'Recurring', 'CertificateProfile', 'Description'], None, True), 'EntryContext': { "Panorama.EDL(val.Name == obj.Name)": edl_output } }) @logger def panorama_create_edl(edl_name, url, type_, recurring, certificate_profile=None, description=None): params = { 'action': 'set', 'type': 'config', 'xpath': XPATH_OBJECTS + "external-list/entry[@name='" + edl_name + "']/type/" + type_, 'key': API_KEY } params['element'] = add_argument(url, 'url', False) + '<recurring><' + recurring + '/></recurring>' + add_argument( certificate_profile, 'certificate-profile', False) + add_argument(description, 'description', False) result = http_request( URL, 'POST', body=params, ) return result def panorama_create_edl_command(): """ Create an edl object """ edl_name = demisto.args().get('name') url = demisto.args().get('url').replace(' ', '%20') type_ = demisto.args().get('type') recurring = demisto.args().get('recurring') certificate_profile = demisto.args().get('certificate_profile') description = demisto.args().get('description') edl = panorama_create_edl(edl_name, url, type_, recurring, certificate_profile, description) edl_output = { 'Name': edl_name, 'URL': url, 'Type': type_, 'Recurring': recurring } if DEVICE_GROUP: edl_output['DeviceGroup'] = DEVICE_GROUP if description: edl_output['Description'] = description if certificate_profile: edl_output['CertificateProfile'] = certificate_profile demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': edl, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'External Dynamic List was created successfully.', 'EntryContext': { "Panorama.EDL(val.Name == obj.Name)": edl_output } }) @logger def panorama_edit_edl(edl_name, element_to_change, element_value): edl_prev = panorama_get_edl(edl_name) if '@dirtyId' in edl_prev: raise Exception('Please commit the instance prior to editing the External Dynamic List') edl_type = ''.join(edl_prev['type'].keys()) edl_output = {'Name': edl_name} if DEVICE_GROUP: edl_output['DeviceGroup'] = DEVICE_GROUP params = {'action': 'edit', 'type': 'config', 'key': API_KEY, 'xpath': XPATH_OBJECTS + "external-list/entry[@name='" + edl_name + "']/type/" + edl_type + "/" + element_to_change} if element_to_change == 'url': params['element'] = add_argument_open(element_value, 'url', False) result = http_request(URL, 'POST', body=params) edl_output['URL'] = element_value elif element_to_change == 'certificate_profile': params['element'] = add_argument_open(element_value, 'certificate-profile', False) result = http_request(URL, 'POST', body=params) edl_output['CertificateProfile'] = element_value elif element_to_change == 'description': params['element'] = add_argument_open(element_value, 'description', False) result = http_request(URL, 'POST', body=params) edl_output['Description'] = element_value # element_to_change == 'recurring' else: if element_value not in ['five-minute', 'hourly']: raise Exception('Recurring segment must be five-minute or hourly') params['element'] = '<recurring><' + element_value + '/></recurring>' result = http_request(URL, 'POST', body=params) edl_output['Recurring'] = element_value return result, edl_output def panorama_edit_edl_command(): """ Edit an EDL """ edl_name = demisto.args()['name'] element_to_change = demisto.args()['element_to_change'] element_value = demisto.args()['element_value'] result, edl_output = panorama_edit_edl(edl_name, element_to_change, element_value) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'External Dynamic List was edited successfully', 'EntryContext': { "Panorama.EDL(val.Name == obj.Name)": edl_output } }) @logger def panorama_delete_edl(edl_name): params = { 'action': 'delete', 'type': 'config', 'xpath': XPATH_OBJECTS + "external-list/entry[@name='" + edl_name + "']", 'element': "<entry name='" + edl_name + "'></entry>", 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_delete_edl_command(): """ Delete an EDL """ edl_name = demisto.args()['name'] edl = panorama_delete_edl(edl_name) edl_output = {'Name': edl_name} if DEVICE_GROUP: edl_output['DeviceGroup'] = DEVICE_GROUP demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': edl, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'External Dynamic List was deleted successfully', 'EntryContext': { "Panorama.EDL(val.Name == obj.Name)": edl_output } }) def panorama_refresh_edl(edl_name): edl = panorama_get_edl(edl_name) edl_type = ''.join(edl['type'].keys()) params = { 'type': 'op', 'cmd': '<request><system><external-list><refresh><type><' + edl_type + '><name>' + edl_name + '</name></' + edl_type + '></type></refresh></external-list></system></request>', 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_refresh_edl_command(): """ Refresh an EDL """ if DEVICE_GROUP: raise Exception('EDL refresh is only supported on Firewall (not Panorama).') edl_name = demisto.args()['name'] result = panorama_refresh_edl(edl_name) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Refreshed External Dynamic List successfully', }) ''' IP Tags ''' @logger def panorama_register_ip_tag(tag: str, ips: List, persistent: str): entry: str = '' for ip in ips: entry += f'<entry ip=\"{ip}\" persistent=\"{persistent}\"><tag><member>{tag}</member></tag></entry>' params = { 'type': 'user-id', 'cmd': '<uid-message><version>2.0</version><type>update</type><payload><register>' + entry + '</register></payload></uid-message>', 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_register_ip_tag_command(): """ Register IPs to a Tag """ tag = demisto.args()['tag'] ips = argToList(demisto.args()['IPs']) persistent = demisto.args()['persistent'] if 'persistent' in demisto.args() else 'true' persistent = '1' if persistent == 'true' else '0' result = panorama_register_ip_tag(tag, ips, str(persistent)) registered_ip: Dict[str, str] = {} # update context only if IPs are persistent if persistent == '1': # get existing IPs for this tag context_ips = demisto.dt(demisto.context(), 'Panorama.DynamicTags(val.Tag ==\"' + tag + '\").IPs') if context_ips: all_ips = ips + context_ips else: all_ips = ips registered_ip = { 'Tag': tag, 'IPs': all_ips } demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Registered ip-tag successfully', 'EntryContext': { "Panorama.DynamicTags(val.Tag == obj.Tag)": registered_ip } }) @logger def panorama_unregister_ip_tag(tag: str, ips: list): entry = '' for ip in ips: entry += '<entry ip=\"' + ip + '\"><tag><member>' + tag + '</member></tag></entry>' params = { 'type': 'user-id', 'cmd': '<uid-message><version>2.0</version><type>update</type><payload><unregister>' + entry + '</unregister></payload></uid-message>', 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_unregister_ip_tag_command(): """ Register IPs to a Tag """ tag = demisto.args()['tag'] ips = argToList(demisto.args()['IPs']) result = panorama_unregister_ip_tag(tag, ips) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Unregistered ip-tag successfully' }) ''' User Tags ''' @logger def panorama_register_user_tag(tag: str, users: List): entry: str = '' for user in users: entry += f'<entry user=\"{user}\"><tag><member>{tag}</member></tag></entry>' params = { 'type': 'user-id', 'cmd': f'<uid-message><version>2.0</version><type>update</type><payload><register-user>{entry}' f'</register-user></payload></uid-message>', 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_register_user_tag_command(): """ Register Users to a Tag """ major_version = get_pan_os_major_version() if major_version <= 8: raise Exception('The panorama-register-user-tag command is only available for PAN-OS 9.X and above versions.') tag = demisto.args()['tag'] users = argToList(demisto.args()['Users']) result = panorama_register_user_tag(tag, users) # get existing Users for this tag context_users = demisto.dt(demisto.context(), 'Panorama.DynamicTags(val.Tag ==\"' + tag + '\").Users') if context_users: all_users = users + context_users else: all_users = users registered_user = { 'Tag': tag, 'Users': all_users } demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Registered user-tag successfully', 'EntryContext': { "Panorama.DynamicTags(val.Tag == obj.Tag)": registered_user } }) @logger def panorama_unregister_user_tag(tag: str, users: list): entry = '' for user in users: entry += f'<entry user=\"{user}\"><tag><member>{tag}</member></tag></entry>' params = { 'type': 'user-id', 'cmd': f'<uid-message><version>2.0</version><type>update</type><payload><unregister-user>{entry}' f'</unregister-user></payload></uid-message>', 'key': API_KEY } result = http_request( URL, 'POST', body=params, ) return result def panorama_unregister_user_tag_command(): """ Unregister Users from a Tag """ major_version = get_pan_os_major_version() if major_version <= 8: raise Exception('The panorama-unregister-user-tag command is only available for PAN-OS 9.X and above versions.') tag = demisto.args()['tag'] users = argToList(demisto.args()['Users']) result = panorama_unregister_user_tag(tag, users) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['text'], 'HumanReadable': 'Unregistered user-tag successfully' }) ''' Traffic Logs ''' def build_traffic_logs_query(source=None, destination=None, receive_time=None, application=None, to_port=None, action=None): query = '' if source and len(source) > 0: query += '(addr.src in ' + source + ')' if destination and len(destination) > 0: if len(query) > 0 and query[-1] == ')': query += ' and ' query += '(addr.dst in ' + source + ')' if receive_time and len(receive_time) > 0: if len(query) > 0 and query[-1] == ')': query += ' and ' query += '(receive_time geq ' + receive_time + ')' if application and len(application) > 0: if len(query) > 0 and query[-1] == ')': query += ' and ' query += '(app eq ' + application + ')' if to_port and len(to_port) > 0: if len(query) > 0 and query[-1] == ')': query += ' and ' query += '(port.dst eq ' + to_port + ')' if action and len(action) > 0: if len(query) > 0 and query[-1] == ')': query += ' and ' query += '(action eq ' + action + ')' return query @logger def panorama_query_traffic_logs(number_of_logs, direction, query, source, destination, receive_time, application, to_port, action): params = { 'type': 'log', 'log-type': 'traffic', 'key': API_KEY } if query and len(query) > 0: params['query'] = query else: params['query'] = build_traffic_logs_query(source, destination, receive_time, application, to_port, action) if number_of_logs: params['nlogs'] = number_of_logs if direction: params['dir'] = direction result = http_request( URL, 'GET', params=params, ) return result def panorama_query_traffic_logs_command(): """ Query the traffic logs """ number_of_logs = demisto.args().get('number_of_logs') direction = demisto.args().get('direction') query = demisto.args().get('query') source = demisto.args().get('source') destination = demisto.args().get('destination') receive_time = demisto.args().get('receive_time') application = demisto.args().get('application') to_port = demisto.args().get('to_port') action = demisto.args().get('action') if query and (source or destination or receive_time or application or to_port or action): raise Exception('Use the query argument or the ' 'source, destination, receive_time, application, to_port, action arguments to build your query') result = panorama_query_traffic_logs(number_of_logs, direction, query, source, destination, receive_time, application, to_port, action) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query traffic logs failed' + message) else: raise Exception('Query traffic logs failed.') if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']: raise Exception('Missing JobID in response.') query_traffic_output = { 'JobID': result['response']['result']['job'], 'Status': 'Pending' } demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Traffic Logs:', query_traffic_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {"Panorama.TrafficLogs(val.JobID == obj.JobID)": query_traffic_output} }) @logger def panorama_get_traffic_logs(job_id): params = { 'action': 'get', 'type': 'log', 'job-id': job_id, 'key': API_KEY } result = http_request( URL, 'GET', params=params, ) return result def panorama_check_traffic_logs_status_command(): job_id = demisto.args().get('job_id') result = panorama_get_traffic_logs(job_id) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query traffic logs failed' + message) else: raise Exception('Query traffic logs failed.') query_traffic_status_output = { 'JobID': job_id, 'Status': 'Pending' } if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \ or 'status' not in result['response']['result']['job']: raise Exception('Missing JobID status in response.') if result['response']['result']['job']['status'] == 'FIN': query_traffic_status_output['Status'] = 'Completed' demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Traffic Logs status:', query_traffic_status_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {"Panorama.TrafficLogs(val.JobID == obj.JobID)": query_traffic_status_output} }) def prettify_traffic_logs(traffic_logs): pretty_traffic_logs_arr = [] for traffic_log in traffic_logs: pretty_traffic_log = {} if 'action' in traffic_log: pretty_traffic_log['Action'] = traffic_log['action'] if 'action_source' in traffic_log: pretty_traffic_log['ActionSource'] = traffic_log['action_source'] if 'application' in traffic_log: pretty_traffic_log['Application'] = traffic_log['application'] if 'category' in traffic_log: pretty_traffic_log['Category'] = traffic_log['category'] if 'device_name' in traffic_log: pretty_traffic_log['DeviceName'] = traffic_log['device_name'] if 'dst' in traffic_log: pretty_traffic_log['Destination'] = traffic_log['dst'] if 'dport' in traffic_log: pretty_traffic_log['DestinationPort'] = traffic_log['dport'] if 'from' in traffic_log: pretty_traffic_log['FromZone'] = traffic_log['from'] if 'proto' in traffic_log: pretty_traffic_log['Protocol'] = traffic_log['proto'] if 'rule' in traffic_log: pretty_traffic_log['Rule'] = traffic_log['rule'] if 'receive_time' in traffic_log: pretty_traffic_log['ReceiveTime'] = traffic_log['receive_time'] if 'session_end_reason' in traffic_log: pretty_traffic_log['SessionEndReason'] = traffic_log['session_end_reason'] if 'src' in traffic_log: pretty_traffic_log['Source'] = traffic_log['src'] if 'sport' in traffic_log: pretty_traffic_log['SourcePort'] = traffic_log['sport'] if 'start' in traffic_log: pretty_traffic_log['StartTime'] = traffic_log['start'] if 'to' in traffic_log: pretty_traffic_log['ToZone'] = traffic_log['to'] pretty_traffic_logs_arr.append(pretty_traffic_log) return pretty_traffic_logs_arr def panorama_get_traffic_logs_command(): job_id = demisto.args().get('job_id') result = panorama_get_traffic_logs(job_id) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query traffic logs failed' + message) else: raise Exception('Query traffic logs failed.') query_traffic_logs_output = { 'JobID': job_id, 'Status': 'Pending' } if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \ or 'status' not in result['response']['result']['job']: raise Exception('Missing JobID status in response.') if result['response']['result']['job']['status'] != 'FIN': demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Traffic Logs status:', query_traffic_logs_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {"Panorama.TrafficLogs(val.JobID == obj.JobID)": query_traffic_logs_output} }) else: # FIN query_traffic_logs_output['Status'] = 'Completed' if 'response' not in result or 'result' not in result['response'] or 'log' not in result['response']['result'] \ or 'logs' not in result['response']['result']['log']: raise Exception('Missing logs in response.') logs = result['response']['result']['log']['logs'] if logs['@count'] == '0': demisto.results('No traffic logs matched the query') else: pretty_traffic_logs = prettify_traffic_logs(logs['entry']) query_traffic_logs_output['Logs'] = pretty_traffic_logs demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Traffic Logs:', pretty_traffic_logs, ['JobID', 'Source', 'SourcePort', 'Destination', 'DestinationPort', 'Application', 'Action'], removeNull=True), 'EntryContext': {"Panorama.TrafficLogs(val.JobID == obj.JobID)": query_traffic_logs_output} }) ''' Logs ''' def build_array_query(query, arg_string, string, operator): list_string = argToList(arg_string) list_string_length = len(list_string) if list_string_length > 1: query += '(' for i, item in enumerate(list_string): query += f'({string} {operator} \'{item}\')' if i < list_string_length - 1: query += ' or ' if list_string_length > 1: query += ')' return query def build_logs_query(address_src=None, address_dst=None, ip_=None, zone_src=None, zone_dst=None, time_generated=None, action=None, port_dst=None, rule=None, url=None, filedigest=None): query = '' if address_src: query += build_array_query(query, address_src, 'addr.src', 'in') if address_dst: if len(query) > 0 and query[-1] == ')': query += ' and ' query += build_array_query(query, address_dst, 'addr.dst', 'in') if ip_: if len(query) > 0 and query[-1] == ')': query += ' and ' query = build_array_query(query, ip_, 'addr.src', 'in') query += ' or ' query = build_array_query(query, ip_, 'addr.dst', 'in') if zone_src: if len(query) > 0 and query[-1] == ')': query += ' and ' query += build_array_query(query, zone_src, 'zone.src', 'eq') if zone_dst: if len(query) > 0 and query[-1] == ')': query += ' and ' query += build_array_query(query, zone_dst, 'zone.dst', 'eq') if port_dst: if len(query) > 0 and query[-1] == ')': query += ' and ' query += build_array_query(query, port_dst, 'port.dst', 'eq') if time_generated: if len(query) > 0 and query[-1] == ')': query += ' and ' query += '(time_generated leq ' + time_generated + ')' if action: if len(query) > 0 and query[-1] == ')': query += ' and ' query += build_array_query(query, action, 'action', 'eq') if rule: if len(query) > 0 and query[-1] == ')': query += ' and ' query += build_array_query(query, rule, 'rule', 'eq') if url: if len(query) > 0 and query[-1] == ')': query += ' and ' query += build_array_query(query, url, 'url', 'contains') if filedigest: if len(query) > 0 and query[-1] == ')': query += ' and ' query += build_array_query(query, filedigest, 'filedigest', 'eq') return query @logger def panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_, zone_src, zone_dst, time_generated, action, port_dst, rule, url, filedigest): params = { 'type': 'log', 'log-type': log_type, 'key': API_KEY } if filedigest and log_type != 'wildfire': raise Exception('The filedigest argument is only relevant to wildfire log type.') if url and log_type == 'traffic': raise Exception('The url argument is not relevant to traffic log type.') if query: params['query'] = query else: if ip_ and (address_src or address_dst): raise Exception('The ip argument cannot be used with the address-source or the address-destination arguments.') params['query'] = build_logs_query(address_src, address_dst, ip_, zone_src, zone_dst, time_generated, action, port_dst, rule, url, filedigest) if number_of_logs: params['nlogs'] = number_of_logs result = http_request( URL, 'GET', params=params, ) return result def panorama_query_logs_command(): """ Query logs """ log_type = demisto.args().get('log-type') number_of_logs = demisto.args().get('number_of_logs') query = demisto.args().get('query') address_src = demisto.args().get('addr-src') address_dst = demisto.args().get('addr-dst') ip_ = demisto.args().get('ip') zone_src = demisto.args().get('zone-src') zone_dst = demisto.args().get('zone-dst') time_generated = demisto.args().get('time-generated') action = demisto.args().get('action') port_dst = demisto.args().get('port-dst') rule = demisto.args().get('rule') filedigest = demisto.args().get('filedigest') url = demisto.args().get('url') if url and url[-1] != '/': url += '/' if query and (address_src or address_dst or zone_src or zone_dst or time_generated or action or port_dst or rule or url or filedigest): raise Exception('Use the free query argument or the fixed search parameters arguments to build your query.') result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_, zone_src, zone_dst, time_generated, action, port_dst, rule, url, filedigest) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']: raise Exception('Missing JobID in response.') query_logs_output = { 'JobID': result['response']['result']['job'], 'Status': 'Pending', 'LogType': log_type, 'Message': result['response']['result']['msg']['line'] } demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Logs:', query_logs_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {"Panorama.Monitor(val.JobID == obj.JobID)": query_logs_output} }) def panorama_check_logs_status_command(): """ Check query logs status """ job_ids = argToList(demisto.args().get('job_id')) for job_id in job_ids: result = panorama_get_traffic_logs(job_id) if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') query_logs_status_output = { 'JobID': job_id, 'Status': 'Pending' } if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \ or 'status' not in result['response']['result']['job']: raise Exception('Missing JobID status in response.') if result['response']['result']['job']['status'] == 'FIN': query_logs_status_output['Status'] = 'Completed' demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Logs status:', query_logs_status_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {"Panorama.Monitor(val.JobID == obj.JobID)": query_logs_status_output} }) def prettify_log(log): pretty_log = {} if 'action' in log: pretty_log['Action'] = log['action'] if 'app' in log: pretty_log['Application'] = log['app'] if 'category' in log: pretty_log['CategoryOrVerdict'] = log['category'] if 'device_name' in log: pretty_log['DeviceName'] = log['device_name'] if 'dst' in log: pretty_log['DestinationAddress'] = log['dst'] if 'dstuser' in log: pretty_log['DestinationUser'] = log['dstuser'] if 'dstloc' in log: pretty_log['DestinationCountry'] = log['dstloc'] if 'dport' in log: pretty_log['DestinationPort'] = log['dport'] if 'filedigest' in log: pretty_log['FileDigest'] = log['filedigest'] if 'filename' in log: pretty_log['FileName'] = log['filename'] if 'filetype' in log: pretty_log['FileType'] = log['filetype'] if 'from' in log: pretty_log['FromZone'] = log['from'] if 'misc' in log: pretty_log['URLOrFilename'] = log['misc'] if 'natdst' in log: pretty_log['NATDestinationIP'] = log['natdst'] if 'natdport' in log: pretty_log['NATDestinationPort'] = log['natdport'] if 'natsrc' in log: pretty_log['NATSourceIP'] = log['natsrc'] if 'natsport' in log: pretty_log['NATSourcePort'] = log['natsport'] if 'pcap_id' in log: pretty_log['PCAPid'] = log['pcap_id'] if 'proto' in log: pretty_log['IPProtocol'] = log['proto'] if 'recipient' in log: pretty_log['Recipient'] = log['recipient'] if 'rule' in log: pretty_log['Rule'] = log['rule'] if 'rule_uuid' in log: pretty_log['RuleID'] = log['rule_uuid'] if 'receive_time' in log: pretty_log['ReceiveTime'] = log['receive_time'] if 'sender' in log: pretty_log['Sender'] = log['sender'] if 'sessionid' in log: pretty_log['SessionID'] = log['sessionid'] if 'serial' in log: pretty_log['DeviceSN'] = log['serial'] if 'severity' in log: pretty_log['Severity'] = log['severity'] if 'src' in log: pretty_log['SourceAddress'] = log['src'] if 'srcloc' in log: pretty_log['SourceCountry'] = log['srcloc'] if 'srcuser' in log: pretty_log['SourceUser'] = log['srcuser'] if 'sport' in log: pretty_log['SourcePort'] = log['sport'] if 'thr_category' in log: pretty_log['ThreatCategory'] = log['thr_category'] if 'threatid' in log: pretty_log['Name'] = log['threatid'] if 'tid' in log: pretty_log['ID'] = log['tid'] if 'to' in log: pretty_log['ToZone'] = log['to'] if 'time_generated' in log: pretty_log['TimeGenerated'] = log['time_generated'] if 'url_category_list' in log: pretty_log['URLCategoryList'] = log['url_category_list'] return pretty_log def prettify_logs(logs): if not isinstance(logs, list): # handle case of only one log that matched the query return prettify_log(logs) pretty_logs_arr = [] for log in logs: pretty_log = prettify_log(log) pretty_logs_arr.append(pretty_log) return pretty_logs_arr def panorama_get_logs_command(): ignore_auto_extract = demisto.args().get('ignore_auto_extract') == 'true' job_ids = argToList(demisto.args().get('job_id')) for job_id in job_ids: result = panorama_get_traffic_logs(job_id) log_type_dt = demisto.dt(demisto.context(), f'Panorama.Monitor(val.JobID === "{job_id}").LogType') if isinstance(log_type_dt, list): log_type = log_type_dt[0] else: log_type = log_type_dt if result['response']['@status'] == 'error': if 'msg' in result['response'] and 'line' in result['response']['msg']: message = '. Reason is: ' + result['response']['msg']['line'] raise Exception('Query logs failed' + message) else: raise Exception('Query logs failed.') query_logs_output = { 'JobID': job_id, 'Status': 'Pending' } if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \ or 'status' not in result['response']['result']['job']: raise Exception('Missing JobID status in response.') if result['response']['result']['job']['status'] != 'FIN': demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Query Logs status:', query_logs_output, ['JobID', 'Status'], removeNull=True), 'EntryContext': {"Panorama.Monitor(val.JobID == obj.JobID)": query_logs_output} }) else: # FIN query_logs_output['Status'] = 'Completed' if 'response' not in result or 'result' not in result['response'] or 'log' not in result['response']['result'] \ or 'logs' not in result['response']['result']['log']: raise Exception('Missing logs in response.') logs = result['response']['result']['log']['logs'] if logs['@count'] == '0': human_readable = f'No {log_type} logs matched the query.' else: pretty_logs = prettify_logs(logs['entry']) query_logs_output['Logs'] = pretty_logs human_readable = tableToMarkdown('Query ' + log_type + ' Logs:', query_logs_output['Logs'], ['TimeGenerated', 'SourceAddress', 'DestinationAddress', 'Application', 'Action', 'Rule', 'URLOrFilename'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'IgnoreAutoExtract': ignore_auto_extract, 'EntryContext': {"Panorama.Monitor(val.JobID == obj.JobID)": query_logs_output} }) ''' Security Policy Match''' def build_policy_match_query(application=None, category=None, destination=None, destination_port=None, from_=None, to_=None, protocol=None, source=None, source_user=None): query = '<test><security-policy-match>' if from_: query += f'<from>{from_}</from>' if to_: query += f'<to>{to_}</to>' if source: query += f'<source>{source}</source>' if destination: query += f'<destination>{destination}</destination>' if destination_port: query += f'<destination-port>{destination_port}</destination-port>' if protocol: query += f'<protocol>{protocol}</protocol>' if source_user: query += f'<source-user>{source_user}</source-user>' if application: query += f'<application>{application}</application>' if category: query += f'<category>{category}</category>' query += '</security-policy-match></test>' return query def panorama_security_policy_match(application=None, category=None, destination=None, destination_port=None, from_=None, to_=None, protocol=None, source=None, source_user=None): params = {'type': 'op', 'key': API_KEY, 'cmd': build_policy_match_query(application, category, destination, destination_port, from_, to_, protocol, source, source_user)} result = http_request( URL, 'GET', params=params ) return result['response']['result'] def prettify_matching_rule(matching_rule): pretty_matching_rule = {} if '@name' in matching_rule: pretty_matching_rule['Name'] = matching_rule['@name'] if 'from' in matching_rule: pretty_matching_rule['From'] = matching_rule['from'] if 'source' in matching_rule: pretty_matching_rule['Source'] = matching_rule['source'] if 'to' in matching_rule: pretty_matching_rule['To'] = matching_rule['to'] if 'destination' in matching_rule: pretty_matching_rule['Destination'] = matching_rule['destination'] if 'category' in matching_rule: pretty_matching_rule['Category'] = matching_rule['category'] if 'action' in matching_rule: pretty_matching_rule['Action'] = matching_rule['action'] return pretty_matching_rule def prettify_matching_rules(matching_rules): if not isinstance(matching_rules, list): # handle case of only one log that matched the query return prettify_matching_rule(matching_rules) pretty_matching_rules_arr = [] for matching_rule in matching_rules: pretty_matching_rule = prettify_matching_rule(matching_rule) pretty_matching_rules_arr.append(pretty_matching_rule) return pretty_matching_rules_arr def prettify_query_fields(application=None, category=None, destination=None, destination_port=None, from_=None, to_=None, protocol=None, source=None, source_user=None): pretty_query_fields = {'Source': source, 'Destination': destination, 'Protocol': protocol} if application: pretty_query_fields['Application'] = application if category: pretty_query_fields['Category'] = category if destination_port: pretty_query_fields['DestinationPort'] = destination_port if from_: pretty_query_fields['From'] = from_ if to_: pretty_query_fields['To'] = to_ if source_user: pretty_query_fields['SourceUser'] = source_user return pretty_query_fields def panorama_security_policy_match_command(): if not VSYS: raise Exception("The 'panorama-security-policy-match' command is only relevant for a Firewall instance.") application = demisto.args().get('application') category = demisto.args().get('category') destination = demisto.args().get('destination') destination_port = demisto.args().get('destination-port') from_ = demisto.args().get('from') to_ = demisto.args().get('to') protocol = demisto.args().get('protocol') source = demisto.args().get('source') source_user = demisto.args().get('source-user') matching_rules = panorama_security_policy_match(application, category, destination, destination_port, from_, to_, protocol, source, source_user) if not matching_rules: demisto.results('The query did not match a Security policy.') else: ec_ = {'Rules': prettify_matching_rules(matching_rules['rules']['entry']), 'QueryFields': prettify_query_fields(application, category, destination, destination_port, from_, to_, protocol, source, source_user), 'Query': build_policy_match_query(application, category, destination, destination_port, from_, to_, protocol, source, source_user)} demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': matching_rules, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown('Matching Security Policies:', ec_['Rules'], ['Name', 'Action', 'From', 'To', 'Source', 'Destination', 'Application'], removeNull=True), 'EntryContext': {"Panorama.SecurityPolicyMatch(val.Query == obj.Query)": ec_} }) ''' Static Routes''' def prettify_static_route(static_route: Dict, virtual_router: str, template: Optional[str] = None) -> Dict[str, str]: pretty_static_route: Dict = {} if '@name' in static_route: pretty_static_route['Name'] = static_route['@name'] if 'bfd' in static_route and 'profile' in static_route['bfd']: pretty_static_route['BFDprofile'] = static_route['bfd']['profile'] if 'destination' in static_route: if '@dirtyId' in static_route['destination']: pretty_static_route['Uncommitted'] = True else: pretty_static_route['Destination'] = static_route['destination'] if 'metric' in static_route: pretty_static_route['Metric'] = int(static_route['metric']) if 'nexthop' in static_route: if '@dirtyId' in static_route['destination']: pretty_static_route['Uncommitted'] = True else: nexthop: Dict[str, str] = static_route['nexthop'] if 'ip-address' in nexthop: pretty_static_route['NextHop'] = nexthop['ip-address'] elif 'next-vr' in static_route['nexthop']: pretty_static_route['NextHop'] = nexthop['next-vr'] elif 'fqdn' in static_route['nexthop']: pretty_static_route['NextHop'] = nexthop['fqdn'] elif 'discard' in static_route['nexthop']: pretty_static_route['NextHop'] = nexthop['discard'] if 'route-table' in static_route: route_table = static_route['route-table'] if 'unicast' in route_table: pretty_static_route['RouteTable'] = 'Unicast' elif 'multicast' in route_table: pretty_static_route['RouteTable'] = 'Multicast' elif 'both' in route_table: pretty_static_route['RouteTable'] = 'Both' else: # route table is no-install pretty_static_route['RouteTable'] = 'No install' pretty_static_route['VirtualRouter'] = virtual_router if template: pretty_static_route['Template'] = template return pretty_static_route def prettify_static_routes(static_routes, virtual_router: str, template: Optional[str] = None): if not isinstance(static_routes, list): # handle case of only one static route in a virtual router return prettify_static_route(static_routes, virtual_router, template) pretty_static_route_arr = [] for static_route in static_routes: pretty_static_route = prettify_static_route(static_route, virtual_router, template) pretty_static_route_arr.append(pretty_static_route) return pretty_static_route_arr @logger def panorama_list_static_routes(xpath_network: str, virtual_router: str, show_uncommitted: str) -> Dict[str, str]: action = 'get' if show_uncommitted else 'show' params = { 'action': action, 'type': 'config', 'xpath': f'{xpath_network}/virtual-router/entry[@name=\'{virtual_router}\']/routing-table/ip/static-route', 'key': API_KEY } result = http_request(URL, 'GET', params=params) return result['response']['result'] def panorama_list_static_routes_command(): """ List all static routes of a virtual Router """ template = demisto.args().get('template') xpath_network, template = set_xpath_network(template) virtual_router = demisto.args()['virtual_router'] show_uncommitted = demisto.args().get('show_uncommitted') == 'true' virtual_router_object = panorama_list_static_routes(xpath_network, virtual_router, show_uncommitted) if 'static-route' not in virtual_router_object or 'entry' not in virtual_router_object['static-route']: human_readable = 'The Virtual Router has does not exist or has no static routes configured.' static_routes = virtual_router_object else: static_routes = prettify_static_routes(virtual_router_object['static-route']['entry'], virtual_router, template) table_header = f'Displaying all Static Routes for the Virtual Router: {virtual_router}' headers = ['Name', 'Destination', 'NextHop', 'Uncommitted', 'RouteTable', 'Metric', 'BFDprofile'] human_readable = tableToMarkdown(name=table_header, t=static_routes, headers=headers, removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': virtual_router_object, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': {"Panorama.StaticRoutes(val.Name == obj.Name)": static_routes} }) @logger def panorama_get_static_route(xpath_network: str, virtual_router: str, static_route_name: str) -> Dict[str, str]: params = { 'action': 'get', 'type': 'config', 'xpath': f'{xpath_network}/virtual-router/entry[@name=\'{virtual_router}\']/routing-table/ip/' f'static-route/entry[@name=\'{static_route_name}\']', 'key': API_KEY } result = http_request(URL, 'GET', params=params) return result['response']['result'] def panorama_get_static_route_command(): """ Get a static route of a virtual router """ template = demisto.args().get('template') xpath_network, template = set_xpath_network(template) virtual_router = demisto.args()['virtual_router'] static_route_name = demisto.args()['static_route'] static_route_object = panorama_get_static_route(xpath_network, virtual_router, static_route_name) if '@count' in static_route_object and int(static_route_object['@count']) < 1: raise Exception('Static route does not exist.') static_route = prettify_static_route(static_route_object['entry'], virtual_router, template) table_header = f'Static route: {static_route_name}' demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': static_route_object, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': tableToMarkdown(name=table_header, t=static_route, removeNull=True), 'EntryContext': { "Panorama.StaticRoutes(val.Name == obj.Name)": static_route } }) @logger def panorama_add_static_route(xpath_network: str, virtual_router: str, static_route_name: str, destination: str, nexthop_type: str, nexthop_value: str, interface: str = None, metric: str = None) -> Dict[str, str]: params = { 'action': 'set', 'type': 'config', 'key': API_KEY, 'xpath': f'{xpath_network}/virtual-router/entry[@name=\'{virtual_router}\']/' f'routing-table/ip/static-route/entry[@name=\'{static_route_name}\']', 'element': f'<destination>{destination}</destination>' f'<nexthop><{nexthop_type}>{nexthop_value}</{nexthop_type}></nexthop>' } if interface: params['element'] += f'<interface>{interface}</interface>' if metric: params['element'] += f'<metric>{metric}</metric>' result = http_request(URL, 'GET', params=params) return result['response'] def panorama_add_static_route_command(): """ Add a Static Route """ template = demisto.args().get('template') xpath_network, template = set_xpath_network(template) virtual_router = demisto.args().get('virtual_router') static_route_name = demisto.args().get('static_route') destination = demisto.args().get('destination') nexthop_type = demisto.args().get('nexthop_type') nexthop_value = demisto.args().get('nexthop_value') interface = demisto.args().get('interface', None) metric = demisto.args().get('metric', None) if nexthop_type == 'fqdn': # Only from PAN-OS 9.x, creating a static route based on FQDN nexthop is available. major_version = get_pan_os_major_version() if major_version <= 8: raise Exception('Next Hop of type FQDN is only available for PAN-OS 9.x instances.') static_route = panorama_add_static_route(xpath_network, virtual_router, static_route_name, destination, nexthop_type, nexthop_value, interface, metric) human_readable = f'New uncommitted static route {static_route_name} configuration added.' entry_context = { 'Name': static_route_name, 'VirtualRouter': virtual_router, 'Destination': destination, 'NextHop': nexthop_value, } if interface: entry_context['Interface'] = interface if metric: entry_context['Metric'] = metric if template: entry_context['Template'] = template demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': static_route, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': {"Panorama.StaticRoutes(val.Name == obj.Name)": static_route} }) @logger def panorama_delete_static_route(xpath_network: str, virtual_router: str, route_name: str) -> Dict[str, str]: params = { 'action': 'delete', 'type': 'config', 'xpath': f'{xpath_network}/virtual-router/entry[@name=\'{virtual_router}\']/' f'routing-table/ip/static-route/entry[@name=\'{route_name}\']', 'key': API_KEY } result = http_request(URL, 'DELETE', params=params) return result def panorama_delete_static_route_command(): """ Delete a Static Route """ template = demisto.args().get('template') xpath_network, template = set_xpath_network(template) virtual_router = demisto.args()['virtual_router'] route_name = demisto.args()['route_name'] deleted_static_route = panorama_delete_static_route(xpath_network, virtual_router, route_name) entry_context = { 'Name': route_name, 'Deleted': True } demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': deleted_static_route, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': f'The static route: {route_name} was deleted. Changes are not committed.', 'EntryContext': {"Panorama.StaticRoutes(val.Name == obj.Name)": entry_context} # add key -> deleted: true }) def panorama_show_device_version(target: str = None): params = { 'type': 'op', 'cmd': '<show><system><info/></system></show>', 'key': API_KEY } if target: params['target'] = target result = http_request( URL, 'GET', params=params ) return result['response']['result']['system'] def panorama_show_device_version_command(): """ Get device details and show message in war room """ target = str(demisto.args()['target']) if 'target' in demisto.args() else None response = panorama_show_device_version(target) info_data = { 'Devicename': response['devicename'], 'Model': response['model'], 'Serial': response['serial'], 'Version': response['sw-version'] } entry_context = {"Panorama.Device.Info(val.Devicename === obj.Devicename)": info_data} headers = ['Devicename', 'Model', 'Serial', 'Version'] human_readable = tableToMarkdown('Device Version:', info_data, headers=headers, removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': response, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) @logger def panorama_download_latest_content_update_content(target: str): params = { 'type': 'op', 'target': target, 'cmd': '<request><content><upgrade><download><latest/></download></upgrade></content></request>', 'key': API_KEY } result = http_request( URL, 'POST', body=params ) return result def panorama_download_latest_content_update_command(): """ Download content and show message in war room """ if DEVICE_GROUP: raise Exception('Download latest content is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None result = panorama_download_latest_content_update_content(target) if 'result' in result['response']: # download has been given a jobid download_status_output = { 'JobID': result['response']['result']['job'], 'Status': 'Pending' } entry_context = {"Panorama.Content.Download(val.JobID == obj.JobID)": download_status_output} human_readable = tableToMarkdown('Content download:', download_status_output, ['JobID', 'Status'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) else: # no download took place demisto.results(result['response']['msg']) @logger def panorama_content_update_download_status(target: str, job_id: str): params = { 'type': 'op', 'cmd': f'<show><jobs><id>{job_id}</id></jobs></show>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def panorama_content_update_download_status_command(): """ Check jobID of content update download status """ if DEVICE_GROUP: raise Exception('Content download status is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None job_id = demisto.args()['job_id'] result = panorama_content_update_download_status(target, job_id) content_download_status = { 'JobID': result['response']['result']['job']['id'] } if result['response']['result']['job']['status'] == 'FIN': if result['response']['result']['job']['result'] == 'OK': content_download_status['Status'] = 'Completed' else: content_download_status['Status'] = 'Failed' content_download_status['Details'] = result['response']['result']['job'] if result['response']['result']['job']['status'] == 'PEND': content_download_status['Status'] = 'Pending' entry_context = {"Panorama.Content.Download(val.JobID == obj.JobID)": content_download_status} human_readable = tableToMarkdown('Content download status:', content_download_status, ['JobID', 'Status', 'Details'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) @logger def panorama_install_latest_content_update(target: str): params = { 'type': 'op', 'cmd': '<request><content><upgrade><install><version>latest</version></install></upgrade></content></request>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def panorama_install_latest_content_update_command(): """ Check jobID of content content install status """ if DEVICE_GROUP: raise Exception('Content download status is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None result = panorama_install_latest_content_update(target) if 'result' in result['response']: # installation has been given a jobid content_install_info = { 'JobID': result['response']['result']['job'], 'Status': 'Pending' } entry_context = {"Panorama.Content.Install(val.JobID == obj.JobID)": content_install_info} human_readable = tableToMarkdown('Result:', content_install_info, ['JobID', 'Status'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) else: # no content install took place demisto.results(result['response']['msg']) @logger def panorama_content_update_install_status(target: str, job_id: str): params = { 'type': 'op', 'cmd': f'<show><jobs><id>{job_id}</id></jobs></show>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def panorama_content_update_install_status_command(): """ Check jobID of content update install status """ if DEVICE_GROUP: raise Exception('Content download status is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None job_id = demisto.args()['job_id'] result = panorama_content_update_install_status(target, job_id) content_install_status = { 'JobID': result['response']['result']['job']['id'] } if result['response']['result']['job']['status'] == 'FIN': if result['response']['result']['job']['result'] == 'OK': content_install_status['Status'] = 'Completed' else: # result['response']['job']['result'] == 'FAIL' content_install_status['Status'] = 'Failed' content_install_status['Details'] = result['response']['result']['job'] if result['response']['result']['job']['status'] == 'PEND': content_install_status['Status'] = 'Pending' entry_context = {"Panorama.Content.Install(val.JobID == obj.JobID)": content_install_status} human_readable = tableToMarkdown('Content install status:', content_install_status, ['JobID', 'Status', 'Details'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) def panorama_check_latest_panos_software_command(): if DEVICE_GROUP: raise Exception('Checking latest PAN-OS version is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None params = { 'type': 'op', 'cmd': '<request><system><software><check></check></software></system></request>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) demisto.results(result['response']['result']) @logger def panorama_download_panos_version(target: str, target_version: str): params = { 'type': 'op', 'cmd': f'<request><system><software><download><version>{target_version}' f'</version></download></software></system></request>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def panorama_download_panos_version_command(): """ Check jobID of pan-os version download """ if DEVICE_GROUP: raise Exception('Downloading PAN-OS version is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None target_version = str(demisto.args()['target_version']) result = panorama_download_panos_version(target, target_version) if 'result' in result['response']: # download has been given a jobid panos_version_download = { 'JobID': result['response']['result']['job'] } entry_context = {"Panorama.PANOS.Download(val.JobID == obj.JobID)": panos_version_download} human_readable = tableToMarkdown('Result:', panos_version_download, ['JobID', 'Status'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) else: # no panos download took place demisto.results(result['response']['msg']) @logger def panorama_download_panos_status(target: str, job_id: str): params = { 'type': 'op', 'cmd': f'<show><jobs><id>{job_id}</id></jobs></show>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def panorama_download_panos_status_command(): """ Check jobID of panos download status """ if DEVICE_GROUP: raise Exception('PAN-OS version download status is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None job_id = demisto.args()['job_id'] result = panorama_download_panos_status(target, job_id) panos_download_status = { 'JobID': result['response']['result']['job']['id'] } if result['response']['result']['job']['status'] == 'FIN': if result['response']['result']['job']['result'] == 'OK': panos_download_status['Status'] = 'Completed' else: # result['response']['job']['result'] == 'FAIL' panos_download_status['Status'] = 'Failed' panos_download_status['Details'] = result['response']['result']['job'] if result['response']['result']['job']['status'] == 'PEND': panos_download_status['Status'] = 'Pending' human_readable = tableToMarkdown('PAN-OS download status:', panos_download_status, ['JobID', 'Status', 'Details'], removeNull=True) entry_context = {"Panorama.PANOS.Download(val.JobID == obj.JobID)": panos_download_status} demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) @logger def panorama_install_panos_version(target: str, target_version: str): params = { 'type': 'op', 'cmd': f'<request><system><software><install><version>{target_version}' '</version></install></software></system></request>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def panorama_install_panos_version_command(): """ Check jobID of panos install """ if DEVICE_GROUP: raise Exception('PAN-OS installation is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None target_version = str(demisto.args()['target_version']) result = panorama_install_panos_version(target, target_version) if 'result' in result['response']: # panos install has been given a jobid panos_install = { 'JobID': result['response']['result']['job'] } entry_context = {"Panorama.PANOS.Install(val.JobID == obj.JobID)": panos_install} human_readable = tableToMarkdown('PAN-OS Installation:', panos_install, ['JobID', 'Status'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) else: # no panos install took place demisto.results(result['response']['msg']) @logger def panorama_install_panos_status(target: str, job_id: str): params = { 'type': 'op', 'cmd': f'<show><jobs><id>{job_id}</id></jobs></show>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) return result def panorama_install_panos_status_command(): """ Check jobID of panos install status """ if DEVICE_GROUP: raise Exception('PAN-OS installation status status is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None job_id = demisto.args()['job_id'] result = panorama_install_panos_status(target, job_id) panos_install_status = { 'JobID': result['response']['result']['job']['id'] } if result['response']['result']['job']['status'] == 'FIN': if result['response']['result']['job']['result'] == 'OK': panos_install_status['Status'] = 'Completed' else: # result['response']['job']['result'] == 'FAIL' panos_install_status['Status'] = 'Failed' panos_install_status['Details'] = result['response']['result']['job'] if result['response']['result']['job']['status'] == 'PEND': panos_install_status['Status'] = 'Pending' entry_context = {"Panorama.PANOS.Install(val.JobID == obj.JobID)": panos_install_status} human_readable = tableToMarkdown('PAN-OS installation status:', panos_install_status, ['JobID', 'Status', 'Details'], removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': result, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': entry_context }) def panorama_device_reboot_command(): if DEVICE_GROUP: raise Exception('Device reboot is only supported on Firewall (not Panorama).') target = str(demisto.args()['target']) if 'target' in demisto.args() else None params = { 'type': 'op', 'cmd': '<request><restart><system></system></restart></request>', 'target': target, 'key': API_KEY } result = http_request( URL, 'GET', params=params ) demisto.results(result['response']['result']) def main(): LOG(f'Command being called is: {demisto.command()}') try: # Remove proxy if not set to true in params handle_proxy() if demisto.command() == 'test-module': panorama_test() elif demisto.command() == 'panorama': panorama_command() elif demisto.command() == 'panorama-commit': panorama_commit_command() elif demisto.command() == 'panorama-commit-status': panorama_commit_status_command() elif demisto.command() == 'panorama-push-to-device-group': panorama_push_to_device_group_command() elif demisto.command() == 'panorama-push-status': panorama_push_status_command() # Addresses commands elif demisto.command() == 'panorama-list-addresses': panorama_list_addresses_command() elif demisto.command() == 'panorama-get-address': panorama_get_address_command() elif demisto.command() == 'panorama-create-address': panorama_create_address_command() elif demisto.command() == 'panorama-delete-address': panorama_delete_address_command() # Address groups commands elif demisto.command() == 'panorama-list-address-groups': panorama_list_address_groups_command() elif demisto.command() == 'panorama-get-address-group': panorama_get_address_group_command() elif demisto.command() == 'panorama-create-address-group': panorama_create_address_group_command() elif demisto.command() == 'panorama-delete-address-group': panorama_delete_address_group_command() elif demisto.command() == 'panorama-edit-address-group': panorama_edit_address_group_command() # Services commands elif demisto.command() == 'panorama-list-services': panorama_list_services_command() elif demisto.command() == 'panorama-get-service': panorama_get_service_command() elif demisto.command() == 'panorama-create-service': panorama_create_service_command() elif demisto.command() == 'panorama-delete-service': panorama_delete_service_command() # Service groups commands elif demisto.command() == 'panorama-list-service-groups': panorama_list_service_groups_command() elif demisto.command() == 'panorama-get-service-group': panorama_get_service_group_command() elif demisto.command() == 'panorama-create-service-group': panorama_create_service_group_command() elif demisto.command() == 'panorama-delete-service-group': panorama_delete_service_group_command() elif demisto.command() == 'panorama-edit-service-group': panorama_edit_service_group_command() # Custom Url Category commands elif demisto.command() == 'panorama-get-custom-url-category': panorama_get_custom_url_category_command() elif demisto.command() == 'panorama-create-custom-url-category': panorama_create_custom_url_category_command() elif demisto.command() == 'panorama-delete-custom-url-category': panorama_delete_custom_url_category_command() elif demisto.command() == 'panorama-edit-custom-url-category': panorama_edit_custom_url_category_command() # URL Filtering capabilities elif demisto.command() == 'panorama-get-url-category': panorama_get_url_category_command(url_cmd='url') elif demisto.command() == 'panorama-get-url-category-from-cloud': panorama_get_url_category_command(url_cmd='url-info-cloud') elif demisto.command() == 'panorama-get-url-category-from-host': panorama_get_url_category_command(url_cmd='url-info-host') # URL Filter elif demisto.command() == 'panorama-get-url-filter': panorama_get_url_filter_command() elif demisto.command() == 'panorama-create-url-filter': panorama_create_url_filter_command() elif demisto.command() == 'panorama-edit-url-filter': panorama_edit_url_filter_command() elif demisto.command() == 'panorama-delete-url-filter': panorama_delete_url_filter_command() # EDL elif demisto.command() == 'panorama-list-edls': panorama_list_edls_command() elif demisto.command() == 'panorama-get-edl': panorama_get_edl_command() elif demisto.command() == 'panorama-create-edl': panorama_create_edl_command() elif demisto.command() == 'panorama-edit-edl': panorama_edit_edl_command() elif demisto.command() == 'panorama-delete-edl': panorama_delete_edl_command() elif demisto.command() == 'panorama-refresh-edl': panorama_refresh_edl_command() # Registered IPs elif demisto.command() == 'panorama-register-ip-tag': panorama_register_ip_tag_command() elif demisto.command() == 'panorama-unregister-ip-tag': panorama_unregister_ip_tag_command() # Registered Users elif demisto.command() == 'panorama-register-user-tag': panorama_register_user_tag_command() elif demisto.command() == 'panorama-unregister-user-tag': panorama_unregister_user_tag_command() # Security Rules Managing elif demisto.command() == 'panorama-list-rules': panorama_list_rules_command() elif demisto.command() == 'panorama-move-rule': panorama_move_rule_command() # Security Rules Configuration elif demisto.command() == 'panorama-create-rule': panorama_create_rule_command() elif demisto.command() == 'panorama-custom-block-rule': panorama_custom_block_rule_command() elif demisto.command() == 'panorama-edit-rule': panorama_edit_rule_command() elif demisto.command() == 'panorama-delete-rule': panorama_delete_rule_command() # Traffic Logs - deprecated elif demisto.command() == 'panorama-query-traffic-logs': panorama_query_traffic_logs_command() elif demisto.command() == 'panorama-check-traffic-logs-status': panorama_check_traffic_logs_status_command() elif demisto.command() == 'panorama-get-traffic-logs': panorama_get_traffic_logs_command() # Logs elif demisto.command() == 'panorama-query-logs': panorama_query_logs_command() elif demisto.command() == 'panorama-check-logs-status': panorama_check_logs_status_command() elif demisto.command() == 'panorama-get-logs': panorama_get_logs_command() # Pcaps elif demisto.command() == 'panorama-list-pcaps': panorama_list_pcaps_command() elif demisto.command() == 'panorama-get-pcap': panorama_get_pcap_command() # Application elif demisto.command() == 'panorama-list-applications': panorama_list_applications_command() # Test security policy match elif demisto.command() == 'panorama-security-policy-match': panorama_security_policy_match_command() # Static Routes elif demisto.command() == 'panorama-list-static-routes': panorama_list_static_routes_command() elif demisto.command() == 'panorama-get-static-route': panorama_get_static_route_command() elif demisto.command() == 'panorama-add-static-route': panorama_add_static_route_command() elif demisto.command() == 'panorama-delete-static-route': panorama_delete_static_route_command() # Firewall Upgrade # Check device software version elif demisto.command() == 'panorama-show-device-version': panorama_show_device_version_command() # Download the latest content update elif demisto.command() == 'panorama-download-latest-content-update': panorama_download_latest_content_update_command() # Download the latest content update elif demisto.command() == 'panorama-content-update-download-status': panorama_content_update_download_status_command() # Install the latest content update elif demisto.command() == 'panorama-install-latest-content-update': panorama_install_latest_content_update_command() # Content update install status elif demisto.command() == 'panorama-content-update-install-status': panorama_content_update_install_status_command() # Check PAN-OS latest software update elif demisto.command() == 'panorama-check-latest-panos-software': panorama_check_latest_panos_software_command() # Download target PAN-OS version elif demisto.command() == 'panorama-download-panos-version': panorama_download_panos_version_command() # PAN-OS download status elif demisto.command() == 'panorama-download-panos-status': panorama_download_panos_status_command() # PAN-OS software install elif demisto.command() == 'panorama-install-panos-version': panorama_install_panos_version_command() # PAN-OS install status elif demisto.command() == 'panorama-install-panos-status': panorama_install_panos_status_command() # Reboot Panorama Device elif demisto.command() == 'panorama-device-reboot': panorama_device_reboot_command() else: raise NotImplementedError(f'Command {demisto.command()} was not implemented.') except Exception as err: return_error(str(err)) finally: LOG.print_log() if __name__ in ["__builtin__", "builtins", '__main__']: main()
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Matti Hämäläinen <msh@nmr.mgh.harvard.edu> # Denis Engemann <denis.engemann@gmail.com> # Andrew Dykstra <andrew.r.dykstra@gmail.com> # Teon Brooks <teon.brooks@gmail.com> # Daniel McCloy <dan.mccloy@gmail.com> # # License: BSD (3-clause) import os import os.path as op import sys from collections import OrderedDict from copy import deepcopy from functools import partial import numpy as np from scipy import sparse from ..defaults import HEAD_SIZE_DEFAULT, _handle_default from ..utils import (verbose, logger, warn, _check_preload, _validate_type, fill_doc, _check_option) from ..io.compensator import get_current_comp from ..io.constants import FIFF from ..io.meas_info import anonymize_info, Info, MontageMixin, create_info from ..io.pick import (channel_type, pick_info, pick_types, _picks_by_type, _check_excludes_includes, _contains_ch_type, channel_indices_by_type, pick_channels, _picks_to_idx, _get_channel_types) from ..io.write import DATE_NONE def _get_meg_system(info): """Educated guess for the helmet type based on channels.""" have_helmet = True for ch in info['chs']: if ch['kind'] == FIFF.FIFFV_MEG_CH: # Only take first 16 bits, as higher bits store CTF grad comp order coil_type = ch['coil_type'] & 0xFFFF nmag = np.sum( [c['kind'] == FIFF.FIFFV_MEG_CH for c in info['chs']]) if coil_type == FIFF.FIFFV_COIL_NM_122: system = '122m' break elif coil_type // 1000 == 3: # All Vectorview coils are 30xx system = '306m' break elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD): system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh' break elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD: system = 'CTF_275' break elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD: system = 'KIT' # Our helmet does not match very well, so let's just create it have_helmet = False break elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD: system = 'BabySQUID' break elif coil_type == FIFF.FIFFV_COIL_ARTEMIS123_GRAD: system = 'ARTEMIS123' have_helmet = False break else: system = 'unknown' have_helmet = False return system, have_helmet def _get_ch_type(inst, ch_type, allow_ref_meg=False): """Choose a single channel type (usually for plotting). Usually used in plotting to plot a single datatype, e.g. look for mags, then grads, then ... to plot. """ if ch_type is None: allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd', 'fnirs_cw_amplitude', 'fnirs_od', 'hbo', 'hbr', 'ecog', 'seeg'] allowed_types += ['ref_meg'] if allow_ref_meg else [] for type_ in allowed_types: if isinstance(inst, Info): if _contains_ch_type(inst, type_): ch_type = type_ break elif type_ in inst: ch_type = type_ break else: raise RuntimeError('No plottable channel types found') return ch_type @verbose def equalize_channels(instances, copy=True, verbose=None): """Equalize channel picks and ordering across multiple MNE-Python objects. First, all channels that are not common to each object are dropped. Then, using the first object in the list as a template, the channels of each object are re-ordered to match the template. The end result is that all given objects define the same channels, in the same order. Parameters ---------- instances : list A list of MNE-Python objects to equalize the channels for. Objects can be of type Raw, Epochs, Evoked, AverageTFR, Forward, Covariance, CrossSpectralDensity or Info. copy : bool When dropping and/or re-ordering channels, an object will be copied when this parameter is set to ``True``. When set to ``False`` (the default) the dropping and re-ordering of channels happens in-place. .. versionadded:: 0.20.0 %(verbose)s Returns ------- equalized_instances : list A list of MNE-Python objects that have the same channels defined in the same order. Notes ----- This function operates inplace. """ from ..cov import Covariance from ..io.base import BaseRaw from ..io.meas_info import Info from ..epochs import BaseEpochs from ..evoked import Evoked from ..forward import Forward from ..time_frequency import _BaseTFR, CrossSpectralDensity # Instances need to have a `ch_names` attribute and a `pick_channels` # method that supports `ordered=True`. allowed_types = (BaseRaw, BaseEpochs, Evoked, _BaseTFR, Forward, Covariance, CrossSpectralDensity, Info) allowed_types_str = ("Raw, Epochs, Evoked, TFR, Forward, Covariance, " "CrossSpectralDensity or Info") for inst in instances: _validate_type(inst, allowed_types, "Instances to be modified", allowed_types_str) chan_template = instances[0].ch_names logger.info('Identifying common channels ...') channels = [set(inst.ch_names) for inst in instances] common_channels = set(chan_template).intersection(*channels) all_channels = set(chan_template).union(*channels) dropped = list(set(all_channels - common_channels)) # Preserve the order of chan_template order = np.argsort([chan_template.index(ch) for ch in common_channels]) common_channels = np.array(list(common_channels))[order].tolist() # Update all instances to match the common_channels list reordered = False equalized_instances = [] for inst in instances: # Only perform picking when needed if inst.ch_names != common_channels: if copy: inst = inst.copy() inst.pick_channels(common_channels, ordered=True) if len(inst.ch_names) == len(common_channels): reordered = True equalized_instances.append(inst) if dropped: logger.info('Dropped the following channels:\n%s' % dropped) elif reordered: logger.info('Channels have been re-ordered.') return equalized_instances class ContainsMixin(object): """Mixin class for Raw, Evoked, Epochs.""" def __contains__(self, ch_type): """Check channel type membership. Parameters ---------- ch_type : str Channel type to check for. Can be e.g. 'meg', 'eeg', 'stim', etc. Returns ------- in : bool Whether or not the instance contains the given channel type. Examples -------- Channel type membership can be tested as:: >>> 'meg' in inst # doctest: +SKIP True >>> 'seeg' in inst # doctest: +SKIP False """ if ch_type == 'meg': has_ch_type = (_contains_ch_type(self.info, 'mag') or _contains_ch_type(self.info, 'grad')) else: has_ch_type = _contains_ch_type(self.info, ch_type) return has_ch_type @property def compensation_grade(self): """The current gradient compensation grade.""" return get_current_comp(self.info) @fill_doc def get_channel_types(self, picks=None, unique=False, only_data_chs=False): """Get a list of channel type for each channel. Parameters ---------- %(picks_all)s unique : bool Whether to return only unique channel types. Default is ``False``. only_data_chs : bool Whether to ignore non-data channels. Default is ``False``. Returns ------- channel_types : list The channel types. """ return _get_channel_types(self.info, picks=picks, unique=unique, only_data_chs=only_data_chs) # XXX Eventually de-duplicate with _kind_dict of mne/io/meas_info.py _human2fiff = {'ecg': FIFF.FIFFV_ECG_CH, 'eeg': FIFF.FIFFV_EEG_CH, 'emg': FIFF.FIFFV_EMG_CH, 'eog': FIFF.FIFFV_EOG_CH, 'exci': FIFF.FIFFV_EXCI_CH, 'ias': FIFF.FIFFV_IAS_CH, 'misc': FIFF.FIFFV_MISC_CH, 'resp': FIFF.FIFFV_RESP_CH, 'seeg': FIFF.FIFFV_SEEG_CH, 'stim': FIFF.FIFFV_STIM_CH, 'syst': FIFF.FIFFV_SYST_CH, 'bio': FIFF.FIFFV_BIO_CH, 'ecog': FIFF.FIFFV_ECOG_CH, 'fnirs_cw_amplitude': FIFF.FIFFV_FNIRS_CH, 'fnirs_od': FIFF.FIFFV_FNIRS_CH, 'hbo': FIFF.FIFFV_FNIRS_CH, 'hbr': FIFF.FIFFV_FNIRS_CH} _human2unit = {'ecg': FIFF.FIFF_UNIT_V, 'eeg': FIFF.FIFF_UNIT_V, 'emg': FIFF.FIFF_UNIT_V, 'eog': FIFF.FIFF_UNIT_V, 'exci': FIFF.FIFF_UNIT_NONE, 'ias': FIFF.FIFF_UNIT_NONE, 'misc': FIFF.FIFF_UNIT_V, 'resp': FIFF.FIFF_UNIT_NONE, 'seeg': FIFF.FIFF_UNIT_V, 'stim': FIFF.FIFF_UNIT_NONE, 'syst': FIFF.FIFF_UNIT_NONE, 'bio': FIFF.FIFF_UNIT_V, 'ecog': FIFF.FIFF_UNIT_V, 'fnirs_cw_amplitude': FIFF.FIFF_UNIT_V, 'fnirs_od': FIFF.FIFF_UNIT_NONE, 'hbo': FIFF.FIFF_UNIT_MOL, 'hbr': FIFF.FIFF_UNIT_MOL} _unit2human = {FIFF.FIFF_UNIT_V: 'V', FIFF.FIFF_UNIT_T: 'T', FIFF.FIFF_UNIT_T_M: 'T/m', FIFF.FIFF_UNIT_MOL: 'M', FIFF.FIFF_UNIT_NONE: 'NA', FIFF.FIFF_UNIT_CEL: 'C'} def _check_set(ch, projs, ch_type): """Ensure type change is compatible with projectors.""" new_kind = _human2fiff[ch_type] if ch['kind'] != new_kind: for proj in projs: if ch['ch_name'] in proj['data']['col_names']: raise RuntimeError('Cannot change channel type for channel %s ' 'in projector "%s"' % (ch['ch_name'], proj['desc'])) ch['kind'] = new_kind class SetChannelsMixin(MontageMixin): """Mixin class for Raw, Evoked, Epochs.""" @verbose def set_eeg_reference(self, ref_channels='average', projection=False, ch_type='auto', forward=None, verbose=None): """Specify which reference to use for EEG data. Use this function to explicitly specify the desired reference for EEG. This can be either an existing electrode or a new virtual channel. This function will re-reference the data according to the desired reference. Parameters ---------- %(set_eeg_reference_ref_channels)s %(set_eeg_reference_projection)s %(set_eeg_reference_ch_type)s %(set_eeg_reference_forward)s %(verbose_meth)s Returns ------- inst : instance of Raw | Epochs | Evoked Data with EEG channels re-referenced. If ``ref_channels='average'`` and ``projection=True`` a projection will be added instead of directly re-referencing the data. %(set_eeg_reference_see_also_notes)s """ from ..io.reference import set_eeg_reference return set_eeg_reference(self, ref_channels=ref_channels, copy=False, projection=projection, ch_type=ch_type, forward=forward)[0] def _get_channel_positions(self, picks=None): """Get channel locations from info. Parameters ---------- picks : str | list | slice | None None gets good data indices. Notes ----- .. versionadded:: 0.9.0 """ picks = _picks_to_idx(self.info, picks) chs = self.info['chs'] pos = np.array([chs[k]['loc'][:3] for k in picks]) n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0) if n_zero > 1: # XXX some systems have origin (0, 0, 0) raise ValueError('Could not extract channel positions for ' '{} channels'.format(n_zero)) return pos def _set_channel_positions(self, pos, names): """Update channel locations in info. Parameters ---------- pos : array-like | np.ndarray, shape (n_points, 3) The channel positions to be set. names : list of str The names of the channels to be set. Notes ----- .. versionadded:: 0.9.0 """ if len(pos) != len(names): raise ValueError('Number of channel positions not equal to ' 'the number of names given.') pos = np.asarray(pos, dtype=np.float64) if pos.shape[-1] != 3 or pos.ndim != 2: msg = ('Channel positions must have the shape (n_points, 3) ' 'not %s.' % (pos.shape,)) raise ValueError(msg) for name, p in zip(names, pos): if name in self.ch_names: idx = self.ch_names.index(name) self.info['chs'][idx]['loc'][:3] = p else: msg = ('%s was not found in the info. Cannot be updated.' % name) raise ValueError(msg) @verbose def set_channel_types(self, mapping, verbose=None): """Define the sensor type of channels. Parameters ---------- mapping : dict A dictionary mapping a channel to a sensor type (str), e.g., ``{'EEG061': 'eog'}``. %(verbose_meth)s Returns ------- inst : instance of Raw | Epochs | Evoked The instance (modified in place). .. versionchanged:: 0.20 Return the instance. Notes ----- The following sensor types are accepted: ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog, hbo, hbr, fnirs_cw_amplitude, fnirs_od .. versionadded:: 0.9.0 """ ch_names = self.info['ch_names'] # first check and assemble clean mappings of index and name unit_changes = dict() for ch_name, ch_type in mapping.items(): if ch_name not in ch_names: raise ValueError("This channel name (%s) doesn't exist in " "info." % ch_name) c_ind = ch_names.index(ch_name) if ch_type not in _human2fiff: raise ValueError('This function cannot change to this ' 'channel type: %s. Accepted channel types ' 'are %s.' % (ch_type, ", ".join(sorted(_human2unit.keys())))) # Set sensor type _check_set(self.info['chs'][c_ind], self.info['projs'], ch_type) unit_old = self.info['chs'][c_ind]['unit'] unit_new = _human2unit[ch_type] if unit_old not in _unit2human: raise ValueError("Channel '%s' has unknown unit (%s). Please " "fix the measurement info of your data." % (ch_name, unit_old)) if unit_old != _human2unit[ch_type]: this_change = (_unit2human[unit_old], _unit2human[unit_new]) if this_change not in unit_changes: unit_changes[this_change] = list() unit_changes[this_change].append(ch_name) self.info['chs'][c_ind]['unit'] = _human2unit[ch_type] if ch_type in ['eeg', 'seeg', 'ecog']: coil_type = FIFF.FIFFV_COIL_EEG elif ch_type == 'hbo': coil_type = FIFF.FIFFV_COIL_FNIRS_HBO elif ch_type == 'hbr': coil_type = FIFF.FIFFV_COIL_FNIRS_HBR elif ch_type == 'fnirs_cw_amplitude': coil_type = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE elif ch_type == 'fnirs_od': coil_type = FIFF.FIFFV_COIL_FNIRS_OD else: coil_type = FIFF.FIFFV_COIL_NONE self.info['chs'][c_ind]['coil_type'] = coil_type msg = "The unit for channel(s) {0} has changed from {1} to {2}." for this_change, names in unit_changes.items(): warn(msg.format(", ".join(sorted(names)), *this_change)) return self @fill_doc def rename_channels(self, mapping): """Rename channels. Parameters ---------- %(rename_channels_mapping)s Returns ------- inst : instance of Raw | Epochs | Evoked The instance (modified in place). .. versionchanged:: 0.20 Return the instance. Notes ----- .. versionadded:: 0.9.0 """ rename_channels(self.info, mapping) return self @verbose def plot_sensors(self, kind='topomap', ch_type=None, title=None, show_names=False, ch_groups=None, to_sphere=True, axes=None, block=False, show=True, sphere=None, verbose=None): """Plot sensor positions. Parameters ---------- kind : str Whether to plot the sensors as 3d, topomap or as an interactive sensor selection dialog. Available options 'topomap', '3d', 'select'. If 'select', a set of channels can be selected interactively by using lasso selector or clicking while holding control key. The selected channels are returned along with the figure instance. Defaults to 'topomap'. ch_type : None | str The channel type to plot. Available options 'mag', 'grad', 'eeg', 'seeg', 'ecog', 'all'. If ``'all'``, all the available mag, grad, eeg, seeg and ecog channels are plotted. If None (default), then channels are chosen in the order given above. title : str | None Title for the figure. If None (default), equals to ``'Sensor positions (%%s)' %% ch_type``. show_names : bool | array of str Whether to display all channel names. If an array, only the channel names in the array are shown. Defaults to False. ch_groups : 'position' | array of shape (n_ch_groups, n_picks) | None Channel groups for coloring the sensors. If None (default), default coloring scheme is used. If 'position', the sensors are divided into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If array, the channels are divided by picks given in the array. .. versionadded:: 0.13.0 to_sphere : bool Whether to project the 3d locations to a sphere. When False, the sensor array appears similar as to looking downwards straight above the subject's head. Has no effect when kind='3d'. Defaults to True. .. versionadded:: 0.14.0 axes : instance of Axes | instance of Axes3D | None Axes to draw the sensors to. If ``kind='3d'``, axes must be an instance of Axes3D. If None (default), a new axes will be created. .. versionadded:: 0.13.0 block : bool Whether to halt program execution until the figure is closed. Defaults to False. .. versionadded:: 0.13.0 show : bool Show figure if True. Defaults to True. %(topomap_sphere_auto)s %(verbose_meth)s Returns ------- fig : instance of Figure Figure containing the sensor topography. selection : list A list of selected channels. Only returned if ``kind=='select'``. See Also -------- mne.viz.plot_layout Notes ----- This function plots the sensor locations from the info structure using matplotlib. For drawing the sensors using mayavi see :func:`mne.viz.plot_alignment`. .. versionadded:: 0.12.0 """ from ..viz.utils import plot_sensors return plot_sensors(self.info, kind=kind, ch_type=ch_type, title=title, show_names=show_names, ch_groups=ch_groups, to_sphere=to_sphere, axes=axes, block=block, show=show, sphere=sphere, verbose=verbose) @verbose def anonymize(self, daysback=None, keep_his=False, verbose=None): """Anonymize measurement information in place. Parameters ---------- %(anonymize_info_parameters)s %(verbose)s Returns ------- inst : instance of Raw | Epochs | Evoked The modified instance. Notes ----- %(anonymize_info_notes)s .. versionadded:: 0.13.0 """ anonymize_info(self.info, daysback=daysback, keep_his=keep_his, verbose=verbose) self.set_meas_date(self.info['meas_date']) # unify annot update return self def set_meas_date(self, meas_date): """Set the measurement start date. Parameters ---------- meas_date : datetime | float | tuple | None The new measurement date. If datetime object, it must be timezone-aware and in UTC. A tuple of (seconds, microseconds) or float (alias for ``(meas_date, 0)``) can also be passed and a datetime object will be automatically created. If None, will remove the time reference. Returns ------- inst : instance of Raw | Epochs | Evoked The modified raw instance. Operates in place. See Also -------- mne.io.Raw.anonymize Notes ----- If you want to remove all time references in the file, call :func:`mne.io.anonymize_info(inst.info) <mne.io.anonymize_info>` after calling ``inst.set_meas_date(None)``. .. versionadded:: 0.20 """ from ..annotations import _handle_meas_date meas_date = _handle_meas_date(meas_date) self.info['meas_date'] = meas_date # clear file_id and meas_id if needed if meas_date is None: for key in ('file_id', 'meas_id'): value = self.info.get(key) if value is not None: assert 'msecs' not in value value['secs'] = DATE_NONE[0] value['usecs'] = DATE_NONE[1] # The following copy is needed for a test CTF dataset # otherwise value['machid'][:] = 0 would suffice _tmp = value['machid'].copy() _tmp[:] = 0 value['machid'] = _tmp if hasattr(self, 'annotations'): self.annotations._orig_time = meas_date return self class UpdateChannelsMixin(object): """Mixin class for Raw, Evoked, Epochs, AverageTFR.""" @verbose def pick_types(self, meg=None, eeg=False, stim=False, eog=False, ecg=False, emg=False, ref_meg='auto', misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False, seeg=False, dipole=False, gof=False, bio=False, ecog=False, fnirs=False, csd=False, include=(), exclude='bads', selection=None, verbose=None): """Pick some channels by type and names. Parameters ---------- meg : bool | str If True include MEG channels. If string it can be 'mag', 'grad', 'planar1' or 'planar2' to select only magnetometers, all gradiometers, or a specific type of gradiometer. eeg : bool If True include EEG channels. stim : bool If True include stimulus channels. eog : bool If True include EOG channels. ecg : bool If True include ECG channels. emg : bool If True include EMG channels. ref_meg : bool | str If True include CTF / 4D reference channels. If 'auto', reference channels are included if compensations are present and ``meg`` is not False. Can also be the string options for the ``meg`` parameter. misc : bool If True include miscellaneous analog channels. resp : bool If True include response-trigger channel. For some MEG systems this is separate from the stim channel. chpi : bool If True include continuous HPI coil channels. exci : bool Flux excitation channel used to be a stimulus channel. ias : bool Internal Active Shielding data (maybe on Triux only). syst : bool System status channel information (on Triux systems only). seeg : bool Stereotactic EEG channels. dipole : bool Dipole time course channels. gof : bool Dipole goodness of fit channels. bio : bool Bio channels. ecog : bool Electrocorticography channels. fnirs : bool | str Functional near-infrared spectroscopy channels. If True include all fNIRS channels. If False (default) include none. If string it can be 'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to include channels measuring deoxyhemoglobin). csd : bool EEG-CSD channels. include : list of str List of additional channels to include. If empty do not include any. exclude : list of str | str List of channels to exclude. If 'bads' (default), exclude channels in ``info['bads']``. selection : list of str Restrict sensor channels (MEG, EEG) to this list of channel names. %(verbose_meth)s Returns ------- inst : instance of Raw, Epochs, or Evoked The modified instance. See Also -------- pick_channels Notes ----- .. versionadded:: 0.9.0 """ idx = pick_types( self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg, ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci, ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio, ecog=ecog, fnirs=fnirs, include=include, exclude=exclude, selection=selection) self._pick_drop_channels(idx) # remove dropped channel types from reject and flat if getattr(self, 'reject', None) is not None: # use list(self.reject) to avoid RuntimeError for changing # dictionary size during iteration for ch_type in list(self.reject): if ch_type not in self: del self.reject[ch_type] if getattr(self, 'flat', None) is not None: for ch_type in list(self.flat): if ch_type not in self: del self.flat[ch_type] return self def pick_channels(self, ch_names, ordered=False): """Pick some channels. Parameters ---------- ch_names : list The list of channels to select. ordered : bool If True (default False), ensure that the order of the channels in the modified instance matches the order of ``ch_names``. .. versionadded:: 0.20.0 Returns ------- inst : instance of Raw, Epochs, or Evoked The modified instance. See Also -------- drop_channels pick_types reorder_channels Notes ----- The channel names given are assumed to be a set, i.e. the order does not matter. The original order of the channels is preserved. You can use ``reorder_channels`` to set channel order if necessary. .. versionadded:: 0.9.0 """ picks = pick_channels(self.info['ch_names'], ch_names, ordered=ordered) return self._pick_drop_channels(picks) @fill_doc def pick(self, picks, exclude=()): """Pick a subset of channels. Parameters ---------- %(picks_all)s exclude : list | str Set of channels to exclude, only used when picking based on types (e.g., exclude="bads" when picks="meg"). Returns ------- inst : instance of Raw, Epochs, or Evoked The modified instance. """ picks = _picks_to_idx(self.info, picks, 'all', exclude, allow_empty=False) return self._pick_drop_channels(picks) def reorder_channels(self, ch_names): """Reorder channels. Parameters ---------- ch_names : list The desired channel order. Returns ------- inst : instance of Raw, Epochs, or Evoked The modified instance. See Also -------- drop_channels pick_types pick_channels Notes ----- Channel names must be unique. Channels that are not in ``ch_names`` are dropped. .. versionadded:: 0.16.0 """ _check_excludes_includes(ch_names) idx = list() for ch_name in ch_names: ii = self.ch_names.index(ch_name) if ii in idx: raise ValueError('Channel name repeated: %s' % (ch_name,)) idx.append(ii) return self._pick_drop_channels(idx) def drop_channels(self, ch_names): """Drop channel(s). Parameters ---------- ch_names : iterable or str Iterable (e.g. list) of channel name(s) or channel name to remove. Returns ------- inst : instance of Raw, Epochs, or Evoked The modified instance. See Also -------- reorder_channels pick_channels pick_types Notes ----- .. versionadded:: 0.9.0 """ if isinstance(ch_names, str): ch_names = [ch_names] try: all_str = all([isinstance(ch, str) for ch in ch_names]) except TypeError: raise ValueError("'ch_names' must be iterable, got " "type {} ({}).".format(type(ch_names), ch_names)) if not all_str: raise ValueError("Each element in 'ch_names' must be str, got " "{}.".format([type(ch) for ch in ch_names])) missing = [ch for ch in ch_names if ch not in self.ch_names] if len(missing) > 0: msg = "Channel(s) {0} not found, nothing dropped." raise ValueError(msg.format(", ".join(missing))) bad_idx = [self.ch_names.index(ch) for ch in ch_names if ch in self.ch_names] idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx) return self._pick_drop_channels(idx) def _pick_drop_channels(self, idx): # avoid circular imports from ..io import BaseRaw from ..time_frequency import AverageTFR, EpochsTFR if not isinstance(self, BaseRaw): _check_preload(self, 'adding, dropping, or reordering channels') if getattr(self, 'picks', None) is not None: self.picks = self.picks[idx] if getattr(self, '_read_picks', None) is not None: self._read_picks = [r[idx] for r in self._read_picks] if hasattr(self, '_cals'): self._cals = self._cals[idx] pick_info(self.info, idx, copy=False) if getattr(self, '_projector', None) is not None: self._projector = self._projector[idx][:, idx] # All others (Evoked, Epochs, Raw) have chs axis=-2 axis = -3 if isinstance(self, (AverageTFR, EpochsTFR)) else -2 if hasattr(self, '_data'): # skip non-preloaded Raw self._data = self._data.take(idx, axis=axis) else: assert isinstance(self, BaseRaw) and not self.preload self._pick_projs() return self def _pick_projs(self): """Keep only projectors which apply to at least 1 data channel.""" drop_idx = [] for idx, proj in enumerate(self.info['projs']): if not set(self.info['ch_names']) & set(proj['data']['col_names']): drop_idx.append(idx) for idx in drop_idx: logger.info(f"Removing projector {self.info["projs"][idx]}") if drop_idx and hasattr(self, 'del_proj'): self.del_proj(drop_idx) return self def add_channels(self, add_list, force_update_info=False): """Append new channels to the instance. Parameters ---------- add_list : list A list of objects to append to self. Must contain all the same type as the current object. force_update_info : bool If True, force the info for objects to be appended to match the values in ``self``. This should generally only be used when adding stim channels for which important metadata won't be overwritten. .. versionadded:: 0.12 Returns ------- inst : instance of Raw, Epochs, or Evoked The modified instance. See Also -------- drop_channels Notes ----- If ``self`` is a Raw instance that has been preloaded into a :obj:`numpy.memmap` instance, the memmap will be resized. """ # avoid circular imports from ..io import BaseRaw, _merge_info from ..epochs import BaseEpochs _validate_type(add_list, (list, tuple), 'Input') # Object-specific checks for inst in add_list + [self]: _check_preload(inst, "adding channels") if isinstance(self, BaseRaw): con_axis = 0 comp_class = BaseRaw elif isinstance(self, BaseEpochs): con_axis = 1 comp_class = BaseEpochs else: con_axis = 0 comp_class = type(self) for inst in add_list: _validate_type(inst, comp_class, 'All input') data = [inst._data for inst in [self] + add_list] # Make sure that all dimensions other than channel axis are the same compare_axes = [i for i in range(data[0].ndim) if i != con_axis] shapes = np.array([dat.shape for dat in data])[:, compare_axes] for shape in shapes: if not ((shapes[0] - shape) == 0).all(): raise AssertionError('All data dimensions except channels ' 'must match, got %s != %s' % (shapes[0], shape)) del shapes # Create final data / info objects infos = [self.info] + [inst.info for inst in add_list] new_info = _merge_info(infos, force_update_to_first=force_update_info) # Now update the attributes if isinstance(self._data, np.memmap) and con_axis == 0 and \ sys.platform != 'darwin': # resizing not available--no mremap # Use a resize and fill in other ones out_shape = (sum(d.shape[0] for d in data),) + data[0].shape[1:] n_bytes = np.prod(out_shape) * self._data.dtype.itemsize self._data.flush() self._data.base.resize(n_bytes) self._data = np.memmap(self._data.filename, mode='r+', dtype=self._data.dtype, shape=out_shape) assert self._data.shape == out_shape assert self._data.nbytes == n_bytes offset = len(data[0]) for d in data[1:]: this_len = len(d) self._data[offset:offset + this_len] = d offset += this_len else: self._data = np.concatenate(data, axis=con_axis) self.info = new_info if isinstance(self, BaseRaw): self._cals = np.concatenate([getattr(inst, '_cals') for inst in [self] + add_list]) # We should never use these since data are preloaded, let's just # set it to something large and likely to break (2 ** 31 - 1) extra_idx = [2147483647] * sum(info['nchan'] for info in infos[1:]) assert all(len(r) == infos[0]['nchan'] for r in self._read_picks) self._read_picks = [ np.concatenate([r, extra_idx]) for r in self._read_picks] assert all(len(r) == self.info['nchan'] for r in self._read_picks) return self class InterpolationMixin(object): """Mixin class for Raw, Evoked, Epochs.""" @verbose def interpolate_bads(self, reset_bads=True, mode='accurate', origin='auto', method=None, verbose=None): """Interpolate bad MEG and EEG channels. Operates in place. Parameters ---------- reset_bads : bool If True, remove the bads from info. mode : str Either ``'accurate'`` or ``'fast'``, determines the quality of the Legendre polynomial expansion used for interpolation of channels using the minimum-norm method. origin : array-like, shape (3,) | str Origin of the sphere in the head coordinate frame and in meters. Can be ``'auto'`` (default), which means a head-digitization-based origin fit. .. versionadded:: 0.17 method : dict Method to use for each channel type. Currently only the key "eeg" has multiple options: - ``"spline"`` (default) Use spherical spline interpolation. - ``"MNE"`` Use minimum-norm projection to a sphere and back. This is the method used for MEG channels. The value for "meg" is "MNE", and the value for "fnirs" is "nearest". The default (None) is thus an alias for:: method=dict(meg="MNE", eeg="spline", fnirs="nearest") .. versionadded:: 0.21 %(verbose_meth)s Returns ------- inst : instance of Raw, Epochs, or Evoked The modified instance. Notes ----- .. versionadded:: 0.9.0 """ from ..bem import _check_origin from .interpolation import _interpolate_bads_eeg,\ _interpolate_bads_meeg, _interpolate_bads_nirs _check_preload(self, "interpolation") method = _handle_default('interpolation_method', method) for key in method: _check_option('method[key]', key, ('meg', 'eeg', 'fnirs')) _check_option("method['eeg']", method['eeg'], ('spline', 'MNE')) _check_option("method['meg']", method['meg'], ('MNE',)) _check_option("method['fnirs']", method['fnirs'], ('nearest',)) if len(self.info['bads']) == 0: warn('No bad channels to interpolate. Doing nothing...') return self logger.info('Interpolating bad channels') origin = _check_origin(origin, self.info) if method['eeg'] == 'spline': _interpolate_bads_eeg(self, origin=origin) eeg_mne = False else: eeg_mne = True _interpolate_bads_meeg(self, mode=mode, origin=origin, eeg=eeg_mne) _interpolate_bads_nirs(self) if reset_bads is True: self.info['bads'] = [] return self @fill_doc def rename_channels(info, mapping): """Rename channels. .. warning:: The channel names must have at most 15 characters Parameters ---------- info : dict Measurement info to modify. %(rename_channels_mapping)s """ _validate_type(info, Info, 'info') info._check_consistency() bads = list(info['bads']) # make our own local copies ch_names = list(info['ch_names']) # first check and assemble clean mappings of index and name if isinstance(mapping, dict): orig_names = sorted(list(mapping.keys())) missing = [orig_name not in ch_names for orig_name in orig_names] if any(missing): raise ValueError("Channel name(s) in mapping missing from info: " "%s" % np.array(orig_names)[np.array(missing)]) new_names = [(ch_names.index(ch_name), new_name) for ch_name, new_name in mapping.items()] elif callable(mapping): new_names = [(ci, mapping(ch_name)) for ci, ch_name in enumerate(ch_names)] else: raise ValueError('mapping must be callable or dict, not %s' % (type(mapping),)) # check we got all strings out of the mapping for new_name in new_names: _validate_type(new_name[1], 'str', 'New channel mappings') bad_new_names = [name for _, name in new_names if len(name) > 15] if len(bad_new_names): raise ValueError('Channel names cannot be longer than 15 ' 'characters. These channel names are not ' 'valid : %s' % new_names) # do the remapping locally for c_ind, new_name in new_names: for bi, bad in enumerate(bads): if bad == ch_names[c_ind]: bads[bi] = new_name ch_names[c_ind] = new_name # check that all the channel names are unique if len(ch_names) != len(np.unique(ch_names)): raise ValueError('New channel names are not unique, renaming failed') # do the remapping in info info['bads'] = bads for ch, ch_name in zip(info['chs'], ch_names): ch['ch_name'] = ch_name info._update_redundant() info._check_consistency() def _recursive_flatten(cell, dtype): """Unpack mat files in Python.""" if len(cell) > 0: while not isinstance(cell[0], dtype): cell = [c for d in cell for c in d] return cell @fill_doc def read_ch_adjacency(fname, picks=None): """Parse FieldTrip neighbors .mat file. More information on these neighbor definitions can be found on the related `FieldTrip documentation pages <http://www.fieldtriptoolbox.org/template/neighbours/>`__. Parameters ---------- fname : str The file name. Example: 'neuromag306mag', 'neuromag306planar', 'ctf275', 'biosemi64', etc. %(picks_all)s Picks Must match the template. Returns ------- ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels) The adjacency matrix. ch_names : list The list of channel names present in adjacency matrix. See Also -------- find_ch_adjacency Notes ----- This function is closely related to :func:`find_ch_adjacency`. If you don't know the correct file for the neighbor definitions, :func:`find_ch_adjacency` can compute the adjacency matrix from 2d sensor locations. """ from scipy.io import loadmat if not op.isabs(fname): templates_dir = op.realpath(op.join(op.dirname(__file__), 'data', 'neighbors')) templates = os.listdir(templates_dir) for f in templates: if f == fname: break if f == fname + '_neighb.mat': fname += '_neighb.mat' break else: raise ValueError('I do not know about this neighbor ' 'template: "{}"'.format(fname)) fname = op.join(templates_dir, fname) nb = loadmat(fname)['neighbours'] ch_names = _recursive_flatten(nb['label'], str) picks = _picks_to_idx(len(ch_names), picks) neighbors = [_recursive_flatten(c, str) for c in nb['neighblabel'].flatten()] assert len(ch_names) == len(neighbors) adjacency = _ch_neighbor_adjacency(ch_names, neighbors) # picking before constructing matrix is buggy adjacency = adjacency[picks][:, picks] ch_names = [ch_names[p] for p in picks] return adjacency, ch_names def _ch_neighbor_adjacency(ch_names, neighbors): """Compute sensor adjacency matrix. Parameters ---------- ch_names : list of str The channel names. neighbors : list of list A list of list of channel names. The neighbors to which the channels in ch_names are connected with. Must be of the same length as ch_names. Returns ------- ch_adjacency : scipy.sparse matrix The adjacency matrix. """ if len(ch_names) != len(neighbors): raise ValueError('`ch_names` and `neighbors` must ' 'have the same length') set_neighbors = {c for d in neighbors for c in d} rest = set_neighbors - set(ch_names) if len(rest) > 0: raise ValueError('Some of your neighbors are not present in the ' 'list of channel names') for neigh in neighbors: if (not isinstance(neigh, list) and not all(isinstance(c, str) for c in neigh)): raise ValueError('`neighbors` must be a list of lists of str') ch_adjacency = np.eye(len(ch_names), dtype=bool) for ii, neigbs in enumerate(neighbors): ch_adjacency[ii, [ch_names.index(i) for i in neigbs]] = True ch_adjacency = sparse.csr_matrix(ch_adjacency) return ch_adjacency def find_ch_adjacency(info, ch_type): """Find the adjacency matrix for the given channels. This function tries to infer the appropriate adjacency matrix template for the given channels. If a template is not found, the adjacency matrix is computed using Delaunay triangulation based on 2d sensor locations. Parameters ---------- info : instance of Info The measurement info. ch_type : str | None The channel type for computing the adjacency matrix. Currently supports 'mag', 'grad', 'eeg' and None. If None, the info must contain only one channel type. Returns ------- ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels) The adjacency matrix. ch_names : list The list of channel names present in adjacency matrix. See Also -------- read_ch_adjacency Notes ----- .. versionadded:: 0.15 Automatic detection of an appropriate adjacency matrix template only works for MEG data at the moment. This means that the adjacency matrix is always computed for EEG data and never loaded from a template file. If you want to load a template for a given montage use :func:`read_ch_adjacency` directly. """ if ch_type is None: picks = channel_indices_by_type(info) if sum([len(p) != 0 for p in picks.values()]) != 1: raise ValueError('info must contain only one channel type if ' 'ch_type is None.') ch_type = channel_type(info, 0) else: _check_option('ch_type', ch_type, ['mag', 'grad', 'eeg']) (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types, has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils, has_eeg_coils_and_meg, has_eeg_coils_only, has_neuromag_122_grad, has_csd_coils) = _get_ch_info(info) conn_name = None if has_vv_mag and ch_type == 'mag': conn_name = 'neuromag306mag' elif has_vv_grad and ch_type == 'grad': conn_name = 'neuromag306planar' elif has_neuromag_122_grad: conn_name = 'neuromag122' elif has_4D_mag: if 'MEG 248' in info['ch_names']: idx = info['ch_names'].index('MEG 248') grad = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_GRAD mag = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG if ch_type == 'grad' and grad: conn_name = 'bti248grad' elif ch_type == 'mag' and mag: conn_name = 'bti248' elif 'MEG 148' in info['ch_names'] and ch_type == 'mag': idx = info['ch_names'].index('MEG 148') if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG: conn_name = 'bti148' elif has_CTF_grad and ch_type == 'mag': if info['nchan'] < 100: conn_name = 'ctf64' elif info['nchan'] > 200: conn_name = 'ctf275' else: conn_name = 'ctf151' elif n_kit_grads > 0: from ..io.kit.constants import KIT_NEIGHBORS conn_name = KIT_NEIGHBORS.get(info['kit_system_id']) if conn_name is not None: logger.info('Reading adjacency matrix for %s.' % conn_name) return read_ch_adjacency(conn_name) logger.info('Could not find a adjacency matrix for the data. ' 'Computing adjacency based on Delaunay triangulations.') return _compute_ch_adjacency(info, ch_type) def _compute_ch_adjacency(info, ch_type): """Compute channel adjacency matrix using Delaunay triangulations. Parameters ---------- info : instance of mne.measuerment_info.Info The measurement info. ch_type : str The channel type for computing the adjacency matrix. Currently supports 'mag', 'grad' and 'eeg'. Returns ------- ch_adjacency : scipy.sparse matrix, shape (n_channels, n_channels) The adjacency matrix. ch_names : list The list of channel names present in adjacency matrix. """ from scipy.spatial import Delaunay from .. import spatial_tris_adjacency from ..channels.layout import _find_topomap_coords, _pair_grad_sensors combine_grads = (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in np.unique([ch['coil_type'] for ch in info['chs']])) picks = dict(_picks_by_type(info, exclude=[]))[ch_type] ch_names = [info['ch_names'][pick] for pick in picks] if combine_grads: pairs = _pair_grad_sensors(info, topomap_coords=False, exclude=[]) if len(pairs) != len(picks): raise RuntimeError('Cannot find a pair for some of the ' 'gradiometers. Cannot compute adjacency ' 'matrix.') # only for one of the pair xy = _find_topomap_coords(info, picks[::2], sphere=HEAD_SIZE_DEFAULT) else: xy = _find_topomap_coords(info, picks, sphere=HEAD_SIZE_DEFAULT) tri = Delaunay(xy) neighbors = spatial_tris_adjacency(tri.simplices) if combine_grads: ch_adjacency = np.eye(len(picks), dtype=bool) for idx, neigbs in zip(neighbors.row, neighbors.col): for ii in range(2): # make sure each pair is included for jj in range(2): ch_adjacency[idx * 2 + ii, neigbs * 2 + jj] = True ch_adjacency[idx * 2 + ii, idx * 2 + jj] = True # pair ch_adjacency = sparse.csr_matrix(ch_adjacency) else: ch_adjacency = sparse.lil_matrix(neighbors) ch_adjacency.setdiag(np.repeat(1, ch_adjacency.shape[0])) ch_adjacency = ch_adjacency.tocsr() return ch_adjacency, ch_names def fix_mag_coil_types(info, use_cal=False): """Fix magnetometer coil types. Parameters ---------- info : dict The info dict to correct. Corrections are done in-place. use_cal : bool If True, further refine the check for old coil types by checking ``info['chs'][ii]['cal']``. Notes ----- This function changes magnetometer coil types 3022 (T1: SQ20483N) and 3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition records in the info structure. Neuromag Vectorview systems can contain magnetometers with two different coil sizes (3022 and 3023 vs. 3024). The systems incorporating coils of type 3024 were introduced last and are used at the majority of MEG sites. At some sites with 3024 magnetometers, the data files have still defined the magnetometers to be of type 3022 to ensure compatibility with older versions of Neuromag software. In the MNE software as well as in the present version of Neuromag software coil type 3024 is fully supported. Therefore, it is now safe to upgrade the data files to use the true coil type. .. note:: The effect of the difference between the coil sizes on the current estimates computed by the MNE software is very small. Therefore the use of ``fix_mag_coil_types`` is not mandatory. """ old_mag_inds = _get_T1T2_mag_inds(info, use_cal) for ii in old_mag_inds: info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3 logger.info('%d of %d magnetometer types replaced with T3.' % (len(old_mag_inds), len(pick_types(info, meg='mag')))) info._check_consistency() def _get_T1T2_mag_inds(info, use_cal=False): """Find T1/T2 magnetometer coil types.""" picks = pick_types(info, meg='mag') old_mag_inds = [] # From email exchanges, systems with the larger T2 coil only use the cal # value of 2.09e-11. Newer T3 magnetometers use 4.13e-11 or 1.33e-10 # (Triux). So we can use a simple check for > 3e-11. for ii in picks: ch = info['chs'][ii] if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2): if use_cal: if ch['cal'] > 3e-11: old_mag_inds.append(ii) else: old_mag_inds.append(ii) return old_mag_inds def _get_ch_info(info): """Get channel info for inferring acquisition device.""" chs = info['chs'] # Only take first 16 bits, as higher bits store CTF comp order coil_types = {ch['coil_type'] & 0xFFFF for ch in chs} channel_types = {ch['kind'] for ch in chs} has_vv_mag = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2, FIFF.FIFFV_COIL_VV_MAG_T3]) has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1, FIFF.FIFFV_COIL_VV_PLANAR_T2, FIFF.FIFFV_COIL_VV_PLANAR_T3]) has_neuromag_122_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_NM_122]) is_old_vv = ' ' in chs[0]['ch_name'] has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG, FIFF.FIFFV_COIL_CTF_REF_GRAD, FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD) has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or (FIFF.FIFFV_MEG_CH in channel_types and any(k in ctf_other_types for k in coil_types))) # hack due to MNE-C bug in IO of CTF # only take first 16 bits, as higher bits store CTF comp order n_kit_grads = sum(ch['coil_type'] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD for ch in chs) has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad, n_kit_grads]) has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and FIFF.FIFFV_EEG_CH in channel_types) has_eeg_coils_and_meg = has_eeg_coils and has_any_meg has_eeg_coils_only = has_eeg_coils and not has_any_meg has_csd_coils = (FIFF.FIFFV_COIL_EEG_CSD in coil_types and FIFF.FIFFV_EEG_CH in channel_types) return (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types, has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils, has_eeg_coils_and_meg, has_eeg_coils_only, has_neuromag_122_grad, has_csd_coils) def make_1020_channel_selections(info, midline="z"): """Return dict mapping from ROI names to lists of picks for 10/20 setups. This passes through all channel names, and uses a simple heuristic to separate channel names into three Region of Interest-based selections: Left, Midline and Right. The heuristic is that channels ending on any of the characters in ``midline`` are filed under that heading, otherwise those ending in odd numbers under "Left", those in even numbers under "Right". Other channels are ignored. This is appropriate for 10/20 files, but not for other channel naming conventions. If an info object is provided, lists are sorted from posterior to anterior. Parameters ---------- info : instance of Info Where to obtain the channel names from. The picks will be in relation to the position in ``info["ch_names"]``. If possible, this lists will be sorted by y value position of the channel locations, i.e., from back to front. midline : str Names ending in any of these characters are stored under the ``Midline`` key. Defaults to 'z'. Note that capitalization is ignored. Returns ------- selections : dict A dictionary mapping from ROI names to lists of picks (integers). """ _validate_type(info, "info") try: from .layout import find_layout layout = find_layout(info) pos = layout.pos ch_names = layout.names except RuntimeError: # no channel positions found ch_names = info["ch_names"] pos = None selections = dict(Left=[], Midline=[], Right=[]) for pick, channel in enumerate(ch_names): last_char = channel[-1].lower() # in 10/20, last char codes hemisphere if last_char in midline: selection = "Midline" elif last_char.isdigit(): selection = "Left" if int(last_char) % 2 else "Right" else: # ignore the channel continue selections[selection].append(pick) if pos is not None: # sort channels from front to center # (y-coordinate of the position info in the layout) selections = {selection: np.array(picks)[pos[picks, 1].argsort()] for selection, picks in selections.items()} return selections def combine_channels(inst, groups, method='mean', keep_stim=False, drop_bad=False): """Combine channels based on specified channel grouping. Parameters ---------- inst : instance of Raw, Epochs, or Evoked An MNE-Python object to combine the channels for. The object can be of type Raw, Epochs, or Evoked. groups : dict Specifies which channels are aggregated into a single channel, with aggregation method determined by the ``method`` parameter. One new pseudo-channel is made per dict entry; the dict values must be lists of picks (integer indices of ``ch_names``). For example:: groups=dict(Left=[1, 2, 3, 4], Right=[5, 6, 7, 8]) Note that within a dict entry all channels must have the same type. method : str | callable Which method to use to combine channels. If a :class:`str`, must be one of 'mean', 'median', or 'std' (standard deviation). If callable, the callable must accept one positional input (data of shape ``(n_channels, n_times)``, or ``(n_epochs, n_channels, n_times)``) and return an :class:`array <numpy.ndarray>` of shape ``(n_times,)``, or ``(n_epochs, n_times)``. For example with an instance of Raw or Evoked:: method = lambda data: np.mean(data, axis=0) Another example with an instance of Epochs:: method = lambda data: np.median(data, axis=1) Defaults to ``'mean'``. keep_stim : bool If ``True``, include stimulus channels in the resulting object. Defaults to ``False``. drop_bad : bool If ``True``, drop channels marked as bad before combining. Defaults to ``False``. Returns ------- combined_inst : instance of Raw, Epochs, or Evoked An MNE-Python object of the same type as the input ``inst``, containing one virtual channel for each group in ``groups`` (and, if ``keep_stim`` is ``True``, also containing stimulus channels). """ from ..io import BaseRaw, RawArray from .. import BaseEpochs, EpochsArray, Evoked, EvokedArray ch_axis = 1 if isinstance(inst, BaseEpochs) else 0 ch_idx = list(range(inst.info['nchan'])) ch_names = inst.info['ch_names'] ch_types = inst.get_channel_types() inst_data = inst.data if isinstance(inst, Evoked) else inst.get_data() groups = OrderedDict(deepcopy(groups)) # Convert string values of ``method`` into callables # XXX Possibly de-duplicate with _make_combine_callable of mne/viz/utils.py if isinstance(method, str): method_dict = {key: partial(getattr(np, key), axis=ch_axis) for key in ('mean', 'median', 'std')} try: method = method_dict[method] except KeyError: raise ValueError('"method" must be a callable, or one of "mean", ' f'"median", or "std"; got "{method}".') # Instantiate channel info and data new_ch_names, new_ch_types, new_data = [], [], [] if not isinstance(keep_stim, bool): raise TypeError('"keep_stim" must be of type bool, not ' f'{type(keep_stim)}.') if keep_stim: stim_ch_idx = list(pick_types(inst.info, meg=False, stim=True)) if stim_ch_idx: new_ch_names = [ch_names[idx] for idx in stim_ch_idx] new_ch_types = [ch_types[idx] for idx in stim_ch_idx] new_data = [np.take(inst_data, idx, axis=ch_axis) for idx in stim_ch_idx] else: warn('Could not find stimulus channels.') # Get indices of bad channels ch_idx_bad = [] if not isinstance(drop_bad, bool): raise TypeError('"drop_bad" must be of type bool, not ' f'{type(drop_bad)}.') if drop_bad and inst.info['bads']: ch_idx_bad = pick_channels(ch_names, inst.info['bads']) # Check correctness of combinations for this_group, this_picks in groups.items(): # Check if channel indices are out of bounds if not all(idx in ch_idx for idx in this_picks): raise ValueError('Some channel indices are out of bounds.') # Check if heterogeneous sensor type combinations this_ch_type = np.array(ch_types)[this_picks] if len(set(this_ch_type)) > 1: types = ', '.join(set(this_ch_type)) raise ValueError('Cannot combine sensors of different types; ' f'"{this_group}" contains types {types}.') # Remove bad channels these_bads = [idx for idx in this_picks if idx in ch_idx_bad] this_picks = [idx for idx in this_picks if idx not in ch_idx_bad] if these_bads: logger.info('Dropped the following channels in group ' f'{this_group}: {these_bads}') # Check if combining less than 2 channel if len(set(this_picks)) < 2: warn(f'Less than 2 channels in group "{this_group}" when ' f'combining by method "{method}".') # If all good create more detailed dict without bad channels groups[this_group] = dict(picks=this_picks, ch_type=this_ch_type[0]) # Combine channels and add them to the new instance for this_group, this_group_dict in groups.items(): new_ch_names.append(this_group) new_ch_types.append(this_group_dict['ch_type']) this_picks = this_group_dict['picks'] this_data = np.take(inst_data, this_picks, axis=ch_axis) new_data.append(method(this_data)) new_data = np.swapaxes(new_data, 0, ch_axis) info = create_info(sfreq=inst.info['sfreq'], ch_names=new_ch_names, ch_types=new_ch_types) if isinstance(inst, BaseRaw): combined_inst = RawArray(new_data, info, first_samp=inst.first_samp, verbose=inst.verbose) elif isinstance(inst, BaseEpochs): combined_inst = EpochsArray(new_data, info, events=inst.events, tmin=inst.times[0], verbose=inst.verbose) elif isinstance(inst, Evoked): combined_inst = EvokedArray(new_data, info, tmin=inst.times[0], verbose=inst.verbose) return combined_inst
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Matti Hämäläinen <msh@nmr.mgh.harvard.edu> # Denis Engemann <denis.engemann@gmail.com> # Andrew Dykstra <andrew.r.dykstra@gmail.com> # Teon Brooks <teon.brooks@gmail.com> # Daniel McCloy <dan.mccloy@gmail.com> # # License: BSD (3-clause) import os import os.path as op import sys from collections import OrderedDict from copy import deepcopy from functools import partial import numpy as np from scipy import sparse from ..defaults import HEAD_SIZE_DEFAULT, _handle_default from ..utils import (verbose, logger, warn, _check_preload, _validate_type, fill_doc, _check_option) from ..io.compensator import get_current_comp from ..io.constants import FIFF from ..io.meas_info import anonymize_info, Info, MontageMixin, create_info from ..io.pick import (channel_type, pick_info, pick_types, _picks_by_type, _check_excludes_includes, _contains_ch_type, channel_indices_by_type, pick_channels, _picks_to_idx, _get_channel_types) from ..io.write import DATE_NONE def _get_meg_system(info): """Educated guess for the helmet type based on channels.""" have_helmet = True for ch in info['chs']: if ch['kind'] == FIFF.FIFFV_MEG_CH: # Only take first 16 bits, as higher bits store CTF grad comp order coil_type = ch['coil_type'] & 0xFFFF nmag = np.sum( [c['kind'] == FIFF.FIFFV_MEG_CH for c in info['chs']]) if coil_type == FIFF.FIFFV_COIL_NM_122: system = '122m' break elif coil_type // 1000 == 3: # All Vectorview coils are 30xx system = '306m' break elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD): system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh' break elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD: system = 'CTF_275' break elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD: system = 'KIT' # Our helmet does not match very well, so let's just create it have_helmet = False break elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD: system = 'BabySQUID' break elif coil_type == FIFF.FIFFV_COIL_ARTEMIS123_GRAD: system = 'ARTEMIS123' have_helmet = False break else: system = 'unknown' have_helmet = False return system, have_helmet def _get_ch_type(inst, ch_type, allow_ref_meg=False): """Choose a single channel type (usually for plotting). Usually used in plotting to plot a single datatype, e.g. look for mags, then grads, then ... to plot. """ if ch_type is None: allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd', 'fnirs_cw_amplitude', 'fnirs_od', 'hbo', 'hbr', 'ecog', 'seeg'] allowed_types += ['ref_meg'] if allow_ref_meg else [] for type_ in allowed_types: if isinstance(inst, Info): if _contains_ch_type(inst, type_): ch_type = type_ break elif type_ in inst: ch_type = type_ break else: raise RuntimeError('No plottable channel types found') return ch_type @verbose def equalize_channels(instances, copy=True, verbose=None): """Equalize channel picks and ordering across multiple MNE-Python objects. First, all channels that are not common to each object are dropped. Then, using the first object in the list as a template, the channels of each object are re-ordered to match the template. The end result is that all given objects define the same channels, in the same order. Parameters ---------- instances : list A list of MNE-Python objects to equalize the channels for. Objects can be of type Raw, Epochs, Evoked, AverageTFR, Forward, Covariance, CrossSpectralDensity or Info. copy : bool When dropping and/or re-ordering channels, an object will be copied when this parameter is set to ``True``. When set to ``False`` (the default) the dropping and re-ordering of channels happens in-place. .. versionadded:: 0.20.0 %(verbose)s Returns ------- equalized_instances : list A list of MNE-Python objects that have the same channels defined in the same order. Notes ----- This function operates inplace. """ from ..cov import Covariance from ..io.base import BaseRaw from ..io.meas_info import Info from ..epochs import BaseEpochs from ..evoked import Evoked from ..forward import Forward from ..time_frequency import _BaseTFR, CrossSpectralDensity # Instances need to have a `ch_names` attribute and a `pick_channels` # method that supports `ordered=True`. allowed_types = (BaseRaw, BaseEpochs, Evoked, _BaseTFR, Forward, Covariance, CrossSpectralDensity, Info) allowed_types_str = ("Raw, Epochs, Evoked, TFR, Forward, Covariance, " "CrossSpectralDensity or Info") for inst in instances: _validate_type(inst, allowed_types, "Instances to be modified", allowed_types_str) chan_template = instances[0].ch_names logger.info('Identifying common channels ...') channels = [set(inst.ch_names) for inst in instances] common_channels = set(chan_template).intersection(*channels) all_channels = set(chan_template).union(*channels) dropped = list(set(all_channels - common_channels)) # Preserve the order of chan_template order = np.argsort([chan_template.index(ch) for ch in common_channels]) common_channels = np.array(list(common_channels))[order].tolist() # Update all instances to match the common_channels list reordered = False equalized_instances = [] for inst in instances: # Only perform picking when needed if inst.ch_names != common_channels: if copy: inst = inst.copy() inst.pick_channels(common_channels, ordered=True) if len(inst.ch_names) == len(common_channels): reordered = True equalized_instances.append(inst) if dropped: logger.info('Dropped the following channels:\n%s' % dropped) elif reordered: logger.info('Channels have been re-ordered.') return equalized_instances class ContainsMixin(object): """Mixin class for Raw, Evoked, Epochs.""" def __contains__(self, ch_type): """Check channel type membership. Parameters ---------- ch_type : str Channel type to check for. Can be e.g. 'meg', 'eeg', 'stim', etc. Returns ------- in : bool Whether or not the instance contains the given channel type. Examples -------- Channel type membership can be tested as:: >>> 'meg' in inst # doctest: +SKIP True >>> 'seeg' in inst # doctest: +SKIP False """ if ch_type == 'meg': has_ch_type = (_contains_ch_type(self.info, 'mag') or _contains_ch_type(self.info, 'grad')) else: has_ch_type = _contains_ch_type(self.info, ch_type) return has_ch_type @property def compensation_grade(self): """The current gradient compensation grade.""" return get_current_comp(self.info) @fill_doc def get_channel_types(self, picks=None, unique=False, only_data_chs=False): """Get a list of channel type for each channel. Parameters ---------- %(picks_all)s unique : bool Whether to return only unique channel types. Default is ``False``. only_data_chs : bool Whether to ignore non-data channels. Default is ``False``. Returns ------- channel_types : list The channel types. """ return _get_channel_types(self.info, picks=picks, unique=unique, only_data_chs=only_data_chs) # XXX Eventually de-duplicate with _kind_dict of mne/io/meas_info.py _human2fiff = {'ecg': FIFF.FIFFV_ECG_CH, 'eeg': FIFF.FIFFV_EEG_CH, 'emg': FIFF.FIFFV_EMG_CH, 'eog': FIFF.FIFFV_EOG_CH, 'exci': FIFF.FIFFV_EXCI_CH, 'ias': FIFF.FIFFV_IAS_CH, 'misc': FIFF.FIFFV_MISC_CH, 'resp': FIFF.FIFFV_RESP_CH, 'seeg': FIFF.FIFFV_SEEG_CH, 'stim': FIFF.FIFFV_STIM_CH, 'syst': FIFF.FIFFV_SYST_CH, 'bio': FIFF.FIFFV_BIO_CH, 'ecog': FIFF.FIFFV_ECOG_CH, 'fnirs_cw_amplitude': FIFF.FIFFV_FNIRS_CH, 'fnirs_od': FIFF.FIFFV_FNIRS_CH, 'hbo': FIFF.FIFFV_FNIRS_CH, 'hbr': FIFF.FIFFV_FNIRS_CH} _human2unit = {'ecg': FIFF.FIFF_UNIT_V, 'eeg': FIFF.FIFF_UNIT_V, 'emg': FIFF.FIFF_UNIT_V, 'eog': FIFF.FIFF_UNIT_V, 'exci': FIFF.FIFF_UNIT_NONE, 'ias': FIFF.FIFF_UNIT_NONE, 'misc': FIFF.FIFF_UNIT_V, 'resp': FIFF.FIFF_UNIT_NONE, 'seeg': FIFF.FIFF_UNIT_V, 'stim': FIFF.FIFF_UNIT_NONE, 'syst': FIFF.FIFF_UNIT_NONE, 'bio': FIFF.FIFF_UNIT_V, 'ecog': FIFF.FIFF_UNIT_V, 'fnirs_cw_amplitude': FIFF.FIFF_UNIT_V, 'fnirs_od': FIFF.FIFF_UNIT_NONE, 'hbo': FIFF.FIFF_UNIT_MOL, 'hbr': FIFF.FIFF_UNIT_MOL} _unit2human = {FIFF.FIFF_UNIT_V: 'V', FIFF.FIFF_UNIT_T: 'T', FIFF.FIFF_UNIT_T_M: 'T/m', FIFF.FIFF_UNIT_MOL: 'M', FIFF.FIFF_UNIT_NONE: 'NA', FIFF.FIFF_UNIT_CEL: 'C'} def _check_set(ch, projs, ch_type): """Ensure type change is compatible with projectors.""" new_kind = _human2fiff[ch_type] if ch['kind'] != new_kind: for proj in projs: if ch['ch_name'] in proj['data']['col_names']: raise RuntimeError('Cannot change channel type for channel %s ' 'in projector "%s"' % (ch['ch_name'], proj['desc'])) ch['kind'] = new_kind class SetChannelsMixin(MontageMixin): """Mixin class for Raw, Evoked, Epochs.""" @verbose def set_eeg_reference(self, ref_channels='average', projection=False, ch_type='auto', forward=None, verbose=None): """Specify which reference to use for EEG data. Use this function to explicitly specify the desired reference for EEG. This can be either an existing electrode or a new virtual channel. This function will re-reference the data according to the desired reference. Parameters ---------- %(set_eeg_reference_ref_channels)s %(set_eeg_reference_projection)s %(set_eeg_reference_ch_type)s %(set_eeg_reference_forward)s %(verbose_meth)s Returns ------- inst : instance of Raw | Epochs | Evoked Data with EEG channels re-referenced. If ``ref_channels='average'`` and ``projection=True`` a projection will be added instead of directly re-referencing the data. %(set_eeg_reference_see_also_notes)s """ from ..io.reference import set_eeg_reference return set_eeg_reference(self, ref_channels=ref_channels, copy=False, projection=projection, ch_type=ch_type, forward=forward)[0] def _get_channel_positions(self, picks=None): """Get channel locations from info. Parameters ---------- picks : str | list | slice | None None gets good data indices. Notes ----- .. versionadded:: 0.9.0 """ picks = _picks_to_idx(self.info, picks) chs = self.info['chs'] pos = np.array([chs[k]['loc'][:3] for k in picks]) n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0) if n_zero > 1: # XXX some systems have origin (0, 0, 0) raise ValueError('Could not extract channel positions for ' '{} channels'.format(n_zero)) return pos def _set_channel_positions(self, pos, names): """Update channel locations in info. Parameters ---------- pos : array-like | np.ndarray, shape (n_points, 3) The channel positions to be set. names : list of str The names of the channels to be set. Notes ----- .. versionadded:: 0.9.0 """ if len(pos) != len(names): raise ValueError('Number of channel positions not equal to ' 'the number of names given.') pos = np.asarray(pos, dtype=np.float64) if pos.shape[-1] != 3 or pos.ndim != 2: msg = ('Channel positions must have the shape (n_points, 3) ' 'not %s.' % (pos.shape,)) raise ValueError(msg) for name, p in zip(names, pos): if name in self.ch_names: idx = self.ch_names.index(name) self.info['chs'][idx]['loc'][:3] = p else: msg = ('%s was not found in the info. Cannot be updated.' % name) raise ValueError(msg) @verbose def set_channel_types(self, mapping, verbose=None): """Define the sensor type of channels. Parameters ---------- mapping : dict A dictionary mapping a channel to a sensor type (str), e.g., ``{'EEG061': 'eog'}``. %(verbose_meth)s Returns ------- inst : instance of Raw | Epochs | Evoked The instance (modified in place). .. versionchanged:: 0.20 Return the instance. Notes ----- The following sensor types are accepted: ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog, hbo, hbr, fnirs_cw_amplitude, fnirs_od .. versionadded:: 0.9.0 """ ch_names = self.info['ch_names'] # first check and assemble clean mappings of index and name unit_changes = dict() for ch_name, ch_type in mapping.items(): if ch_name not in ch_names: raise ValueError("This channel name (%s) doesn't exist in " "info." % ch_name) c_ind = ch_names.index(ch_name) if ch_type not in _human2fiff: raise ValueError('This function cannot change to this ' 'channel type: %s. Accepted channel types ' 'are %s.' % (ch_type, ", ".join(sorted(_human2unit.keys())))) # Set sensor type _check_set(self.info['chs'][c_ind], self.info['projs'], ch_type) unit_old = self.info['chs'][c_ind]['unit'] unit_new = _human2unit[ch_type] if unit_old not in _unit2human: raise ValueError("Channel '%s' has unknown unit (%s). Please " "fix the measurement info of your data." % (ch_name, unit_old)) if unit_old != _human2unit[ch_type]: this_change = (_unit2human[unit_old], _unit2human[unit_new]) if this_change not in unit_changes: unit_changes[this_change] = list() unit_changes[this_change].append(ch_name) self.info['chs'][c_ind]['unit'] = _human2unit[ch_type] if ch_type in ['eeg', 'seeg', 'ecog']: coil_type = FIFF.FIFFV_COIL_EEG elif ch_type == 'hbo': coil_type = FIFF.FIFFV_COIL_FNIRS_HBO elif ch_type == 'hbr': coil_type = FIFF.FIFFV_COIL_FNIRS_HBR elif ch_type == 'fnirs_cw_amplitude': coil_type = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE elif ch_type == 'fnirs_od': coil_type = FIFF.FIFFV_COIL_FNIRS_OD else: coil_type = FIFF.FIFFV_COIL_NONE self.info['chs'][c_ind]['coil_type'] = coil_type msg = "The unit for channel(s) {0} has changed from {1} to {2}." for this_change, names in unit_changes.items(): warn(msg.format(", ".join(sorted(names)), *this_change)) return self @fill_doc def rename_channels(self, mapping): """Rename channels. Parameters ---------- %(rename_channels_mapping)s Returns ------- inst : instance of Raw | Epochs | Evoked The instance (modified in place). .. versionchanged:: 0.20 Return the instance. Notes ----- .. versionadded:: 0.9.0 """ rename_channels(self.info, mapping) return self @verbose def plot_sensors(self, kind='topomap', ch_type=None, title=None, show_names=False, ch_groups=None, to_sphere=True, axes=None, block=False, show=True, sphere=None, verbose=None): """Plot sensor positions. Parameters ---------- kind : str Whether to plot the sensors as 3d, topomap or as an interactive sensor selection dialog. Available options 'topomap', '3d', 'select'. If 'select', a set of channels can be selected interactively by using lasso selector or clicking while holding control key. The selected channels are returned along with the figure instance. Defaults to 'topomap'. ch_type : None | str The channel type to plot. Available options 'mag', 'grad', 'eeg', 'seeg', 'ecog', 'all'. If ``'all'``, all the available mag, grad, eeg, seeg and ecog channels are plotted. If None (default), then channels are chosen in the order given above. title : str | None Title for the figure. If None (default), equals to ``'Sensor positions (%%s)' %% ch_type``. show_names : bool | array of str Whether to display all channel names. If an array, only the channel names in the array are shown. Defaults to False. ch_groups : 'position' | array of shape (n_ch_groups, n_picks) | None Channel groups for coloring the sensors. If None (default), default coloring scheme is used. If 'position', the sensors are divided into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If array, the channels are divided by picks given in the array. .. versionadded:: 0.13.0 to_sphere : bool Whether to project the 3d locations to a sphere. When False, the sensor array appears similar as to looking downwards straight above the subject's head. Has no effect when kind='3d'. Defaults to True. .. versionadded:: 0.14.0 axes : instance of Axes | instance of Axes3D | None Axes to draw the sensors to. If ``kind='3d'``, axes must be an instance of Axes3D. If None (default), a new axes will be created. .. versionadded:: 0.13.0 block : bool Whether to halt program execution until the figure is closed. Defaults to False. .. versionadded:: 0.13.0 show : bool Show figure if True. Defaults to True. %(topomap_sphere_auto)s %(verbose_meth)s Returns ------- fig : instance of Figure Figure containing the sensor topography. selection : list A list of selected channels. Only returned if ``kind=='select'``. See Also -------- mne.viz.plot_layout Notes ----- This function plots the sensor locations from the info structure using matplotlib. For drawing the sensors using mayavi see :func:`mne.viz.plot_alignment`. .. versionadded:: 0.12.0 """ from ..viz.utils import plot_sensors return plot_sensors(self.info, kind=kind, ch_type=ch_type, title=title, show_names=show_names, ch_groups=ch_groups, to_sphere=to_sphere, axes=axes, block=block, show=show, sphere=sphere, verbose=verbose) @verbose def anonymize(self, daysback=None, keep_his=False, verbose=None): """Anonymize measurement information in place. Parameters ---------- %(anonymize_info_parameters)s %(verbose)s Returns ------- inst : instance of Raw | Epochs | Evoked The modified instance. Notes ----- %(anonymize_info_notes)s .. versionadded:: 0.13.0 """ anonymize_info(self.info, daysback=daysback, keep_his=keep_his, verbose=verbose) self.set_meas_date(self.info['meas_date']) # unify annot update return self def set_meas_date(self, meas_date): """Set the measurement start date. Parameters ---------- meas_date : datetime | float | tuple | None The new measurement date. If datetime object, it must be timezone-aware and in UTC. A tuple of (seconds, microseconds) or float (alias for ``(meas_date, 0)``) can also be passed and a datetime object will be automatically created. If None, will remove the time reference. Returns ------- inst : instance of Raw | Epochs | Evoked The modified raw instance. Operates in place. See Also -------- mne.io.Raw.anonymize Notes ----- If you want to remove all time references in the file, call :func:`mne.io.anonymize_info(inst.info) <mne.io.anonymize_info>` after calling ``inst.set_meas_date(None)``. .. versionadded:: 0.20 """ from ..annotations import _handle_meas_date meas_date = _handle_meas_date(meas_date) self.info['meas_date'] = meas_date # clear file_id and meas_id if needed if meas_date is None: for key in ('file_id', 'meas_id'): value = self.info.get(key) if value is not None: assert 'msecs' not in value value['secs'] = DATE_NONE[0] value['usecs'] = DATE_NONE[1] # The following copy is needed for a test CTF dataset # otherwise value['machid'][:] = 0 would suffice _tmp = value['machid'].copy() _tmp[:] = 0 value['machid'] = _tmp if hasattr(self, 'annotations'): self.annotations._orig_time = meas_date return self class UpdateChannelsMixin(object): """Mixin class for Raw, Evoked, Epochs, AverageTFR.""" @verbose def pick_types(self, meg=None, eeg=False, stim=False, eog=False, ecg=False, emg=False, ref_meg='auto', misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False, seeg=False, dipole=False, gof=False, bio=False, ecog=False, fnirs=False, csd=False, include=(), exclude='bads', selection=None, verbose=None): """Pick some channels by type and names. Parameters ---------- meg : bool | str If True include MEG channels. If string it can be 'mag', 'grad', 'planar1' or 'planar2' to select only magnetometers, all gradiometers, or a specific type of gradiometer. eeg : bool If True include EEG channels. stim : bool If True include stimulus channels. eog : bool If True include EOG channels. ecg : bool If True include ECG channels. emg : bool If True include EMG channels. ref_meg : bool | str If True include CTF / 4D reference channels. If 'auto', reference channels are included if compensations are present and ``meg`` is not False. Can also be the string options for the ``meg`` parameter. misc : bool If True include miscellaneous analog channels. resp : bool If True include response-trigger channel. For some MEG systems this is separate from the stim channel. chpi : bool If True include continuous HPI coil channels. exci : bool Flux excitation channel used to be a stimulus channel. ias : bool Internal Active Shielding data (maybe on Triux only). syst : bool System status channel information (on Triux systems only). seeg : bool Stereotactic EEG channels. dipole : bool Dipole time course channels. gof : bool Dipole goodness of fit channels. bio : bool Bio channels. ecog : bool Electrocorticography channels. fnirs : bool | str Functional near-infrared spectroscopy channels. If True include all fNIRS channels. If False (default) include none. If string it can be 'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to include channels measuring deoxyhemoglobin). csd : bool EEG-CSD channels. include : list of str List of additional channels to include. If empty do not include any. exclude : list of str | str List of channels to exclude. If 'bads' (default), exclude channels in ``info['bads']``. selection : list of str Restrict sensor channels (MEG, EEG) to this list of channel names. %(verbose_meth)s Returns ------- inst : instance of Raw, Epochs, or Evoked The modified instance. See Also -------- pick_channels Notes ----- .. versionadded:: 0.9.0 """ idx = pick_types( self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg, ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci, ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio, ecog=ecog, fnirs=fnirs, include=include, exclude=exclude, selection=selection) self._pick_drop_channels(idx) # remove dropped channel types from reject and flat if getattr(self, 'reject', None) is not None: # use list(self.reject) to avoid RuntimeError for changing # dictionary size during iteration for ch_type in list(self.reject): if ch_type not in self: del self.reject[ch_type] if getattr(self, 'flat', None) is not None: for ch_type in list(self.flat): if ch_type not in self: del self.flat[ch_type] return self def pick_channels(self, ch_names, ordered=False): """Pick some channels. Parameters ---------- ch_names : list The list of channels to select. ordered : bool If True (default False), ensure that the order of the channels in the modified instance matches the order of ``ch_names``. .. versionadded:: 0.20.0 Returns ------- inst : instance of Raw, Epochs, or Evoked The modified instance. See Also -------- drop_channels pick_types reorder_channels Notes ----- The channel names given are assumed to be a set, i.e. the order does not matter. The original order of the channels is preserved. You can use ``reorder_channels`` to set channel order if necessary. .. versionadded:: 0.9.0 """ picks = pick_channels(self.info['ch_names'], ch_names, ordered=ordered) return self._pick_drop_channels(picks) @fill_doc def pick(self, picks, exclude=()): """Pick a subset of channels. Parameters ---------- %(picks_all)s exclude : list | str Set of channels to exclude, only used when picking based on types (e.g., exclude="bads" when picks="meg"). Returns ------- inst : instance of Raw, Epochs, or Evoked The modified instance. """ picks = _picks_to_idx(self.info, picks, 'all', exclude, allow_empty=False) return self._pick_drop_channels(picks) def reorder_channels(self, ch_names): """Reorder channels. Parameters ---------- ch_names : list The desired channel order. Returns ------- inst : instance of Raw, Epochs, or Evoked The modified instance. See Also -------- drop_channels pick_types pick_channels Notes ----- Channel names must be unique. Channels that are not in ``ch_names`` are dropped. .. versionadded:: 0.16.0 """ _check_excludes_includes(ch_names) idx = list() for ch_name in ch_names: ii = self.ch_names.index(ch_name) if ii in idx: raise ValueError('Channel name repeated: %s' % (ch_name,)) idx.append(ii) return self._pick_drop_channels(idx) def drop_channels(self, ch_names): """Drop channel(s). Parameters ---------- ch_names : iterable or str Iterable (e.g. list) of channel name(s) or channel name to remove. Returns ------- inst : instance of Raw, Epochs, or Evoked The modified instance. See Also -------- reorder_channels pick_channels pick_types Notes ----- .. versionadded:: 0.9.0 """ if isinstance(ch_names, str): ch_names = [ch_names] try: all_str = all([isinstance(ch, str) for ch in ch_names]) except TypeError: raise ValueError("'ch_names' must be iterable, got " "type {} ({}).".format(type(ch_names), ch_names)) if not all_str: raise ValueError("Each element in 'ch_names' must be str, got " "{}.".format([type(ch) for ch in ch_names])) missing = [ch for ch in ch_names if ch not in self.ch_names] if len(missing) > 0: msg = "Channel(s) {0} not found, nothing dropped." raise ValueError(msg.format(", ".join(missing))) bad_idx = [self.ch_names.index(ch) for ch in ch_names if ch in self.ch_names] idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx) return self._pick_drop_channels(idx) def _pick_drop_channels(self, idx): # avoid circular imports from ..io import BaseRaw from ..time_frequency import AverageTFR, EpochsTFR if not isinstance(self, BaseRaw): _check_preload(self, 'adding, dropping, or reordering channels') if getattr(self, 'picks', None) is not None: self.picks = self.picks[idx] if getattr(self, '_read_picks', None) is not None: self._read_picks = [r[idx] for r in self._read_picks] if hasattr(self, '_cals'): self._cals = self._cals[idx] pick_info(self.info, idx, copy=False) if getattr(self, '_projector', None) is not None: self._projector = self._projector[idx][:, idx] # All others (Evoked, Epochs, Raw) have chs axis=-2 axis = -3 if isinstance(self, (AverageTFR, EpochsTFR)) else -2 if hasattr(self, '_data'): # skip non-preloaded Raw self._data = self._data.take(idx, axis=axis) else: assert isinstance(self, BaseRaw) and not self.preload self._pick_projs() return self def _pick_projs(self): """Keep only projectors which apply to at least 1 data channel.""" drop_idx = [] for idx, proj in enumerate(self.info['projs']): if not set(self.info['ch_names']) & set(proj['data']['col_names']): drop_idx.append(idx) for idx in drop_idx: logger.info(f"Removing projector {self.info['projs'][idx]}") if drop_idx and hasattr(self, 'del_proj'): self.del_proj(drop_idx) return self def add_channels(self, add_list, force_update_info=False): """Append new channels to the instance. Parameters ---------- add_list : list A list of objects to append to self. Must contain all the same type as the current object. force_update_info : bool If True, force the info for objects to be appended to match the values in ``self``. This should generally only be used when adding stim channels for which important metadata won't be overwritten. .. versionadded:: 0.12 Returns ------- inst : instance of Raw, Epochs, or Evoked The modified instance. See Also -------- drop_channels Notes ----- If ``self`` is a Raw instance that has been preloaded into a :obj:`numpy.memmap` instance, the memmap will be resized. """ # avoid circular imports from ..io import BaseRaw, _merge_info from ..epochs import BaseEpochs _validate_type(add_list, (list, tuple), 'Input') # Object-specific checks for inst in add_list + [self]: _check_preload(inst, "adding channels") if isinstance(self, BaseRaw): con_axis = 0 comp_class = BaseRaw elif isinstance(self, BaseEpochs): con_axis = 1 comp_class = BaseEpochs else: con_axis = 0 comp_class = type(self) for inst in add_list: _validate_type(inst, comp_class, 'All input') data = [inst._data for inst in [self] + add_list] # Make sure that all dimensions other than channel axis are the same compare_axes = [i for i in range(data[0].ndim) if i != con_axis] shapes = np.array([dat.shape for dat in data])[:, compare_axes] for shape in shapes: if not ((shapes[0] - shape) == 0).all(): raise AssertionError('All data dimensions except channels ' 'must match, got %s != %s' % (shapes[0], shape)) del shapes # Create final data / info objects infos = [self.info] + [inst.info for inst in add_list] new_info = _merge_info(infos, force_update_to_first=force_update_info) # Now update the attributes if isinstance(self._data, np.memmap) and con_axis == 0 and \ sys.platform != 'darwin': # resizing not available--no mremap # Use a resize and fill in other ones out_shape = (sum(d.shape[0] for d in data),) + data[0].shape[1:] n_bytes = np.prod(out_shape) * self._data.dtype.itemsize self._data.flush() self._data.base.resize(n_bytes) self._data = np.memmap(self._data.filename, mode='r+', dtype=self._data.dtype, shape=out_shape) assert self._data.shape == out_shape assert self._data.nbytes == n_bytes offset = len(data[0]) for d in data[1:]: this_len = len(d) self._data[offset:offset + this_len] = d offset += this_len else: self._data = np.concatenate(data, axis=con_axis) self.info = new_info if isinstance(self, BaseRaw): self._cals = np.concatenate([getattr(inst, '_cals') for inst in [self] + add_list]) # We should never use these since data are preloaded, let's just # set it to something large and likely to break (2 ** 31 - 1) extra_idx = [2147483647] * sum(info['nchan'] for info in infos[1:]) assert all(len(r) == infos[0]['nchan'] for r in self._read_picks) self._read_picks = [ np.concatenate([r, extra_idx]) for r in self._read_picks] assert all(len(r) == self.info['nchan'] for r in self._read_picks) return self class InterpolationMixin(object): """Mixin class for Raw, Evoked, Epochs.""" @verbose def interpolate_bads(self, reset_bads=True, mode='accurate', origin='auto', method=None, verbose=None): """Interpolate bad MEG and EEG channels. Operates in place. Parameters ---------- reset_bads : bool If True, remove the bads from info. mode : str Either ``'accurate'`` or ``'fast'``, determines the quality of the Legendre polynomial expansion used for interpolation of channels using the minimum-norm method. origin : array-like, shape (3,) | str Origin of the sphere in the head coordinate frame and in meters. Can be ``'auto'`` (default), which means a head-digitization-based origin fit. .. versionadded:: 0.17 method : dict Method to use for each channel type. Currently only the key "eeg" has multiple options: - ``"spline"`` (default) Use spherical spline interpolation. - ``"MNE"`` Use minimum-norm projection to a sphere and back. This is the method used for MEG channels. The value for "meg" is "MNE", and the value for "fnirs" is "nearest". The default (None) is thus an alias for:: method=dict(meg="MNE", eeg="spline", fnirs="nearest") .. versionadded:: 0.21 %(verbose_meth)s Returns ------- inst : instance of Raw, Epochs, or Evoked The modified instance. Notes ----- .. versionadded:: 0.9.0 """ from ..bem import _check_origin from .interpolation import _interpolate_bads_eeg,\ _interpolate_bads_meeg, _interpolate_bads_nirs _check_preload(self, "interpolation") method = _handle_default('interpolation_method', method) for key in method: _check_option('method[key]', key, ('meg', 'eeg', 'fnirs')) _check_option("method['eeg']", method['eeg'], ('spline', 'MNE')) _check_option("method['meg']", method['meg'], ('MNE',)) _check_option("method['fnirs']", method['fnirs'], ('nearest',)) if len(self.info['bads']) == 0: warn('No bad channels to interpolate. Doing nothing...') return self logger.info('Interpolating bad channels') origin = _check_origin(origin, self.info) if method['eeg'] == 'spline': _interpolate_bads_eeg(self, origin=origin) eeg_mne = False else: eeg_mne = True _interpolate_bads_meeg(self, mode=mode, origin=origin, eeg=eeg_mne) _interpolate_bads_nirs(self) if reset_bads is True: self.info['bads'] = [] return self @fill_doc def rename_channels(info, mapping): """Rename channels. .. warning:: The channel names must have at most 15 characters Parameters ---------- info : dict Measurement info to modify. %(rename_channels_mapping)s """ _validate_type(info, Info, 'info') info._check_consistency() bads = list(info['bads']) # make our own local copies ch_names = list(info['ch_names']) # first check and assemble clean mappings of index and name if isinstance(mapping, dict): orig_names = sorted(list(mapping.keys())) missing = [orig_name not in ch_names for orig_name in orig_names] if any(missing): raise ValueError("Channel name(s) in mapping missing from info: " "%s" % np.array(orig_names)[np.array(missing)]) new_names = [(ch_names.index(ch_name), new_name) for ch_name, new_name in mapping.items()] elif callable(mapping): new_names = [(ci, mapping(ch_name)) for ci, ch_name in enumerate(ch_names)] else: raise ValueError('mapping must be callable or dict, not %s' % (type(mapping),)) # check we got all strings out of the mapping for new_name in new_names: _validate_type(new_name[1], 'str', 'New channel mappings') bad_new_names = [name for _, name in new_names if len(name) > 15] if len(bad_new_names): raise ValueError('Channel names cannot be longer than 15 ' 'characters. These channel names are not ' 'valid : %s' % new_names) # do the remapping locally for c_ind, new_name in new_names: for bi, bad in enumerate(bads): if bad == ch_names[c_ind]: bads[bi] = new_name ch_names[c_ind] = new_name # check that all the channel names are unique if len(ch_names) != len(np.unique(ch_names)): raise ValueError('New channel names are not unique, renaming failed') # do the remapping in info info['bads'] = bads for ch, ch_name in zip(info['chs'], ch_names): ch['ch_name'] = ch_name info._update_redundant() info._check_consistency() def _recursive_flatten(cell, dtype): """Unpack mat files in Python.""" if len(cell) > 0: while not isinstance(cell[0], dtype): cell = [c for d in cell for c in d] return cell @fill_doc def read_ch_adjacency(fname, picks=None): """Parse FieldTrip neighbors .mat file. More information on these neighbor definitions can be found on the related `FieldTrip documentation pages <http://www.fieldtriptoolbox.org/template/neighbours/>`__. Parameters ---------- fname : str The file name. Example: 'neuromag306mag', 'neuromag306planar', 'ctf275', 'biosemi64', etc. %(picks_all)s Picks Must match the template. Returns ------- ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels) The adjacency matrix. ch_names : list The list of channel names present in adjacency matrix. See Also -------- find_ch_adjacency Notes ----- This function is closely related to :func:`find_ch_adjacency`. If you don't know the correct file for the neighbor definitions, :func:`find_ch_adjacency` can compute the adjacency matrix from 2d sensor locations. """ from scipy.io import loadmat if not op.isabs(fname): templates_dir = op.realpath(op.join(op.dirname(__file__), 'data', 'neighbors')) templates = os.listdir(templates_dir) for f in templates: if f == fname: break if f == fname + '_neighb.mat': fname += '_neighb.mat' break else: raise ValueError('I do not know about this neighbor ' 'template: "{}"'.format(fname)) fname = op.join(templates_dir, fname) nb = loadmat(fname)['neighbours'] ch_names = _recursive_flatten(nb['label'], str) picks = _picks_to_idx(len(ch_names), picks) neighbors = [_recursive_flatten(c, str) for c in nb['neighblabel'].flatten()] assert len(ch_names) == len(neighbors) adjacency = _ch_neighbor_adjacency(ch_names, neighbors) # picking before constructing matrix is buggy adjacency = adjacency[picks][:, picks] ch_names = [ch_names[p] for p in picks] return adjacency, ch_names def _ch_neighbor_adjacency(ch_names, neighbors): """Compute sensor adjacency matrix. Parameters ---------- ch_names : list of str The channel names. neighbors : list of list A list of list of channel names. The neighbors to which the channels in ch_names are connected with. Must be of the same length as ch_names. Returns ------- ch_adjacency : scipy.sparse matrix The adjacency matrix. """ if len(ch_names) != len(neighbors): raise ValueError('`ch_names` and `neighbors` must ' 'have the same length') set_neighbors = {c for d in neighbors for c in d} rest = set_neighbors - set(ch_names) if len(rest) > 0: raise ValueError('Some of your neighbors are not present in the ' 'list of channel names') for neigh in neighbors: if (not isinstance(neigh, list) and not all(isinstance(c, str) for c in neigh)): raise ValueError('`neighbors` must be a list of lists of str') ch_adjacency = np.eye(len(ch_names), dtype=bool) for ii, neigbs in enumerate(neighbors): ch_adjacency[ii, [ch_names.index(i) for i in neigbs]] = True ch_adjacency = sparse.csr_matrix(ch_adjacency) return ch_adjacency def find_ch_adjacency(info, ch_type): """Find the adjacency matrix for the given channels. This function tries to infer the appropriate adjacency matrix template for the given channels. If a template is not found, the adjacency matrix is computed using Delaunay triangulation based on 2d sensor locations. Parameters ---------- info : instance of Info The measurement info. ch_type : str | None The channel type for computing the adjacency matrix. Currently supports 'mag', 'grad', 'eeg' and None. If None, the info must contain only one channel type. Returns ------- ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels) The adjacency matrix. ch_names : list The list of channel names present in adjacency matrix. See Also -------- read_ch_adjacency Notes ----- .. versionadded:: 0.15 Automatic detection of an appropriate adjacency matrix template only works for MEG data at the moment. This means that the adjacency matrix is always computed for EEG data and never loaded from a template file. If you want to load a template for a given montage use :func:`read_ch_adjacency` directly. """ if ch_type is None: picks = channel_indices_by_type(info) if sum([len(p) != 0 for p in picks.values()]) != 1: raise ValueError('info must contain only one channel type if ' 'ch_type is None.') ch_type = channel_type(info, 0) else: _check_option('ch_type', ch_type, ['mag', 'grad', 'eeg']) (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types, has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils, has_eeg_coils_and_meg, has_eeg_coils_only, has_neuromag_122_grad, has_csd_coils) = _get_ch_info(info) conn_name = None if has_vv_mag and ch_type == 'mag': conn_name = 'neuromag306mag' elif has_vv_grad and ch_type == 'grad': conn_name = 'neuromag306planar' elif has_neuromag_122_grad: conn_name = 'neuromag122' elif has_4D_mag: if 'MEG 248' in info['ch_names']: idx = info['ch_names'].index('MEG 248') grad = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_GRAD mag = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG if ch_type == 'grad' and grad: conn_name = 'bti248grad' elif ch_type == 'mag' and mag: conn_name = 'bti248' elif 'MEG 148' in info['ch_names'] and ch_type == 'mag': idx = info['ch_names'].index('MEG 148') if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG: conn_name = 'bti148' elif has_CTF_grad and ch_type == 'mag': if info['nchan'] < 100: conn_name = 'ctf64' elif info['nchan'] > 200: conn_name = 'ctf275' else: conn_name = 'ctf151' elif n_kit_grads > 0: from ..io.kit.constants import KIT_NEIGHBORS conn_name = KIT_NEIGHBORS.get(info['kit_system_id']) if conn_name is not None: logger.info('Reading adjacency matrix for %s.' % conn_name) return read_ch_adjacency(conn_name) logger.info('Could not find a adjacency matrix for the data. ' 'Computing adjacency based on Delaunay triangulations.') return _compute_ch_adjacency(info, ch_type) def _compute_ch_adjacency(info, ch_type): """Compute channel adjacency matrix using Delaunay triangulations. Parameters ---------- info : instance of mne.measuerment_info.Info The measurement info. ch_type : str The channel type for computing the adjacency matrix. Currently supports 'mag', 'grad' and 'eeg'. Returns ------- ch_adjacency : scipy.sparse matrix, shape (n_channels, n_channels) The adjacency matrix. ch_names : list The list of channel names present in adjacency matrix. """ from scipy.spatial import Delaunay from .. import spatial_tris_adjacency from ..channels.layout import _find_topomap_coords, _pair_grad_sensors combine_grads = (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in np.unique([ch['coil_type'] for ch in info['chs']])) picks = dict(_picks_by_type(info, exclude=[]))[ch_type] ch_names = [info['ch_names'][pick] for pick in picks] if combine_grads: pairs = _pair_grad_sensors(info, topomap_coords=False, exclude=[]) if len(pairs) != len(picks): raise RuntimeError('Cannot find a pair for some of the ' 'gradiometers. Cannot compute adjacency ' 'matrix.') # only for one of the pair xy = _find_topomap_coords(info, picks[::2], sphere=HEAD_SIZE_DEFAULT) else: xy = _find_topomap_coords(info, picks, sphere=HEAD_SIZE_DEFAULT) tri = Delaunay(xy) neighbors = spatial_tris_adjacency(tri.simplices) if combine_grads: ch_adjacency = np.eye(len(picks), dtype=bool) for idx, neigbs in zip(neighbors.row, neighbors.col): for ii in range(2): # make sure each pair is included for jj in range(2): ch_adjacency[idx * 2 + ii, neigbs * 2 + jj] = True ch_adjacency[idx * 2 + ii, idx * 2 + jj] = True # pair ch_adjacency = sparse.csr_matrix(ch_adjacency) else: ch_adjacency = sparse.lil_matrix(neighbors) ch_adjacency.setdiag(np.repeat(1, ch_adjacency.shape[0])) ch_adjacency = ch_adjacency.tocsr() return ch_adjacency, ch_names def fix_mag_coil_types(info, use_cal=False): """Fix magnetometer coil types. Parameters ---------- info : dict The info dict to correct. Corrections are done in-place. use_cal : bool If True, further refine the check for old coil types by checking ``info['chs'][ii]['cal']``. Notes ----- This function changes magnetometer coil types 3022 (T1: SQ20483N) and 3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition records in the info structure. Neuromag Vectorview systems can contain magnetometers with two different coil sizes (3022 and 3023 vs. 3024). The systems incorporating coils of type 3024 were introduced last and are used at the majority of MEG sites. At some sites with 3024 magnetometers, the data files have still defined the magnetometers to be of type 3022 to ensure compatibility with older versions of Neuromag software. In the MNE software as well as in the present version of Neuromag software coil type 3024 is fully supported. Therefore, it is now safe to upgrade the data files to use the true coil type. .. note:: The effect of the difference between the coil sizes on the current estimates computed by the MNE software is very small. Therefore the use of ``fix_mag_coil_types`` is not mandatory. """ old_mag_inds = _get_T1T2_mag_inds(info, use_cal) for ii in old_mag_inds: info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3 logger.info('%d of %d magnetometer types replaced with T3.' % (len(old_mag_inds), len(pick_types(info, meg='mag')))) info._check_consistency() def _get_T1T2_mag_inds(info, use_cal=False): """Find T1/T2 magnetometer coil types.""" picks = pick_types(info, meg='mag') old_mag_inds = [] # From email exchanges, systems with the larger T2 coil only use the cal # value of 2.09e-11. Newer T3 magnetometers use 4.13e-11 or 1.33e-10 # (Triux). So we can use a simple check for > 3e-11. for ii in picks: ch = info['chs'][ii] if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2): if use_cal: if ch['cal'] > 3e-11: old_mag_inds.append(ii) else: old_mag_inds.append(ii) return old_mag_inds def _get_ch_info(info): """Get channel info for inferring acquisition device.""" chs = info['chs'] # Only take first 16 bits, as higher bits store CTF comp order coil_types = {ch['coil_type'] & 0xFFFF for ch in chs} channel_types = {ch['kind'] for ch in chs} has_vv_mag = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2, FIFF.FIFFV_COIL_VV_MAG_T3]) has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1, FIFF.FIFFV_COIL_VV_PLANAR_T2, FIFF.FIFFV_COIL_VV_PLANAR_T3]) has_neuromag_122_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_NM_122]) is_old_vv = ' ' in chs[0]['ch_name'] has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG, FIFF.FIFFV_COIL_CTF_REF_GRAD, FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD) has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or (FIFF.FIFFV_MEG_CH in channel_types and any(k in ctf_other_types for k in coil_types))) # hack due to MNE-C bug in IO of CTF # only take first 16 bits, as higher bits store CTF comp order n_kit_grads = sum(ch['coil_type'] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD for ch in chs) has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad, n_kit_grads]) has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and FIFF.FIFFV_EEG_CH in channel_types) has_eeg_coils_and_meg = has_eeg_coils and has_any_meg has_eeg_coils_only = has_eeg_coils and not has_any_meg has_csd_coils = (FIFF.FIFFV_COIL_EEG_CSD in coil_types and FIFF.FIFFV_EEG_CH in channel_types) return (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types, has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils, has_eeg_coils_and_meg, has_eeg_coils_only, has_neuromag_122_grad, has_csd_coils) def make_1020_channel_selections(info, midline="z"): """Return dict mapping from ROI names to lists of picks for 10/20 setups. This passes through all channel names, and uses a simple heuristic to separate channel names into three Region of Interest-based selections: Left, Midline and Right. The heuristic is that channels ending on any of the characters in ``midline`` are filed under that heading, otherwise those ending in odd numbers under "Left", those in even numbers under "Right". Other channels are ignored. This is appropriate for 10/20 files, but not for other channel naming conventions. If an info object is provided, lists are sorted from posterior to anterior. Parameters ---------- info : instance of Info Where to obtain the channel names from. The picks will be in relation to the position in ``info["ch_names"]``. If possible, this lists will be sorted by y value position of the channel locations, i.e., from back to front. midline : str Names ending in any of these characters are stored under the ``Midline`` key. Defaults to 'z'. Note that capitalization is ignored. Returns ------- selections : dict A dictionary mapping from ROI names to lists of picks (integers). """ _validate_type(info, "info") try: from .layout import find_layout layout = find_layout(info) pos = layout.pos ch_names = layout.names except RuntimeError: # no channel positions found ch_names = info["ch_names"] pos = None selections = dict(Left=[], Midline=[], Right=[]) for pick, channel in enumerate(ch_names): last_char = channel[-1].lower() # in 10/20, last char codes hemisphere if last_char in midline: selection = "Midline" elif last_char.isdigit(): selection = "Left" if int(last_char) % 2 else "Right" else: # ignore the channel continue selections[selection].append(pick) if pos is not None: # sort channels from front to center # (y-coordinate of the position info in the layout) selections = {selection: np.array(picks)[pos[picks, 1].argsort()] for selection, picks in selections.items()} return selections def combine_channels(inst, groups, method='mean', keep_stim=False, drop_bad=False): """Combine channels based on specified channel grouping. Parameters ---------- inst : instance of Raw, Epochs, or Evoked An MNE-Python object to combine the channels for. The object can be of type Raw, Epochs, or Evoked. groups : dict Specifies which channels are aggregated into a single channel, with aggregation method determined by the ``method`` parameter. One new pseudo-channel is made per dict entry; the dict values must be lists of picks (integer indices of ``ch_names``). For example:: groups=dict(Left=[1, 2, 3, 4], Right=[5, 6, 7, 8]) Note that within a dict entry all channels must have the same type. method : str | callable Which method to use to combine channels. If a :class:`str`, must be one of 'mean', 'median', or 'std' (standard deviation). If callable, the callable must accept one positional input (data of shape ``(n_channels, n_times)``, or ``(n_epochs, n_channels, n_times)``) and return an :class:`array <numpy.ndarray>` of shape ``(n_times,)``, or ``(n_epochs, n_times)``. For example with an instance of Raw or Evoked:: method = lambda data: np.mean(data, axis=0) Another example with an instance of Epochs:: method = lambda data: np.median(data, axis=1) Defaults to ``'mean'``. keep_stim : bool If ``True``, include stimulus channels in the resulting object. Defaults to ``False``. drop_bad : bool If ``True``, drop channels marked as bad before combining. Defaults to ``False``. Returns ------- combined_inst : instance of Raw, Epochs, or Evoked An MNE-Python object of the same type as the input ``inst``, containing one virtual channel for each group in ``groups`` (and, if ``keep_stim`` is ``True``, also containing stimulus channels). """ from ..io import BaseRaw, RawArray from .. import BaseEpochs, EpochsArray, Evoked, EvokedArray ch_axis = 1 if isinstance(inst, BaseEpochs) else 0 ch_idx = list(range(inst.info['nchan'])) ch_names = inst.info['ch_names'] ch_types = inst.get_channel_types() inst_data = inst.data if isinstance(inst, Evoked) else inst.get_data() groups = OrderedDict(deepcopy(groups)) # Convert string values of ``method`` into callables # XXX Possibly de-duplicate with _make_combine_callable of mne/viz/utils.py if isinstance(method, str): method_dict = {key: partial(getattr(np, key), axis=ch_axis) for key in ('mean', 'median', 'std')} try: method = method_dict[method] except KeyError: raise ValueError('"method" must be a callable, or one of "mean", ' f'"median", or "std"; got "{method}".') # Instantiate channel info and data new_ch_names, new_ch_types, new_data = [], [], [] if not isinstance(keep_stim, bool): raise TypeError('"keep_stim" must be of type bool, not ' f'{type(keep_stim)}.') if keep_stim: stim_ch_idx = list(pick_types(inst.info, meg=False, stim=True)) if stim_ch_idx: new_ch_names = [ch_names[idx] for idx in stim_ch_idx] new_ch_types = [ch_types[idx] for idx in stim_ch_idx] new_data = [np.take(inst_data, idx, axis=ch_axis) for idx in stim_ch_idx] else: warn('Could not find stimulus channels.') # Get indices of bad channels ch_idx_bad = [] if not isinstance(drop_bad, bool): raise TypeError('"drop_bad" must be of type bool, not ' f'{type(drop_bad)}.') if drop_bad and inst.info['bads']: ch_idx_bad = pick_channels(ch_names, inst.info['bads']) # Check correctness of combinations for this_group, this_picks in groups.items(): # Check if channel indices are out of bounds if not all(idx in ch_idx for idx in this_picks): raise ValueError('Some channel indices are out of bounds.') # Check if heterogeneous sensor type combinations this_ch_type = np.array(ch_types)[this_picks] if len(set(this_ch_type)) > 1: types = ', '.join(set(this_ch_type)) raise ValueError('Cannot combine sensors of different types; ' f'"{this_group}" contains types {types}.') # Remove bad channels these_bads = [idx for idx in this_picks if idx in ch_idx_bad] this_picks = [idx for idx in this_picks if idx not in ch_idx_bad] if these_bads: logger.info('Dropped the following channels in group ' f'{this_group}: {these_bads}') # Check if combining less than 2 channel if len(set(this_picks)) < 2: warn(f'Less than 2 channels in group "{this_group}" when ' f'combining by method "{method}".') # If all good create more detailed dict without bad channels groups[this_group] = dict(picks=this_picks, ch_type=this_ch_type[0]) # Combine channels and add them to the new instance for this_group, this_group_dict in groups.items(): new_ch_names.append(this_group) new_ch_types.append(this_group_dict['ch_type']) this_picks = this_group_dict['picks'] this_data = np.take(inst_data, this_picks, axis=ch_axis) new_data.append(method(this_data)) new_data = np.swapaxes(new_data, 0, ch_axis) info = create_info(sfreq=inst.info['sfreq'], ch_names=new_ch_names, ch_types=new_ch_types) if isinstance(inst, BaseRaw): combined_inst = RawArray(new_data, info, first_samp=inst.first_samp, verbose=inst.verbose) elif isinstance(inst, BaseEpochs): combined_inst = EpochsArray(new_data, info, events=inst.events, tmin=inst.times[0], verbose=inst.verbose) elif isinstance(inst, Evoked): combined_inst = EvokedArray(new_data, info, tmin=inst.times[0], verbose=inst.verbose) return combined_inst
"""Provide the Subreddit class.""" # pylint: disable=too-many-lines import socket from copy import deepcopy from csv import writer from io import StringIO from json import dumps, loads from os.path import basename, dirname, isfile, join from typing import TYPE_CHECKING, Any, Dict, Generator, Iterator, List, Optional, Union from urllib.parse import urljoin from xml.etree.ElementTree import XML import websocket from prawcore import Redirect from requests import Response from ...const import API_PATH, JPEG_HEADER from ...exceptions import ( ClientException, InvalidFlairTemplateID, MediaPostFailed, RedditAPIException, TooLargeMediaException, WebSocketException, ) from ...util.cache import cachedproperty from ..listing.generator import ListingGenerator from ..listing.mixins import SubredditListingMixin from ..util import permissions_string, stream_generator from .base import RedditBase from .emoji import SubredditEmoji from .mixins import FullnameMixin, MessageableMixin from .modmail import ModmailConversation from .removal_reasons import SubredditRemovalReasons from .rules import SubredditRules from .widgets import SubredditWidgets, WidgetEncoder from .wikipage import WikiPage if TYPE_CHECKING: # pragma: no cover from .... import praw class Subreddit(MessageableMixin, SubredditListingMixin, FullnameMixin, RedditBase): """A class for Subreddits. To obtain an instance of this class for subreddit ``r/redditdev`` execute: .. code-block:: python subreddit = reddit.subreddit("redditdev") While ``r/all`` is not a real subreddit, it can still be treated like one. The following outputs the titles of the 25 hottest submissions in ``r/all``: .. code-block:: python for submission in reddit.subreddit("all").hot(limit=25): print(submission.title) Multiple subreddits can be combined with a ``+`` like so: .. code-block:: python for submission in reddit.subreddit("redditdev+learnpython").top("all"): print(submission) Subreddits can be filtered from combined listings as follows. .. note:: These filters are ignored by certain methods, including :attr:`.comments`, :meth:`.gilded`, and :meth:`.SubredditStream.comments`. .. code-block:: python for submission in reddit.subreddit("all-redditdev").new(): print(submission) **Typical Attributes** This table describes attributes that typically belong to objects of this class. Since attributes are dynamically provided (see :ref:`determine-available-attributes-of-an-object`), there is not a guarantee that these attributes will always be present, nor is this list necessarily complete. ========================= ========================================================== Attribute Description ========================= ========================================================== ``can_assign_link_flair`` Whether users can assign their own link flair. ``can_assign_user_flair`` Whether users can assign their own user flair. ``created_utc`` Time the subreddit was created, represented in `Unix Time`_. ``description`` Subreddit description, in Markdown. ``description_html`` Subreddit description, in HTML. ``display_name`` Name of the subreddit. ``id`` ID of the subreddit. ``name`` Fullname of the subreddit. ``over18`` Whether the subreddit is NSFW. ``public_description`` Description of the subreddit, shown in searches and on the "You must be invited to visit this community" page (if applicable). ``spoilers_enabled`` Whether the spoiler tag feature is enabled. ``subscribers`` Count of subscribers. ``user_is_banned`` Whether the authenticated user is banned. ``user_is_moderator`` Whether the authenticated user is a moderator. ``user_is_subscriber`` Whether the authenticated user is subscribed. ========================= ========================================================== .. note:: Trying to retrieve attributes of quarantined or private subreddits will result in a 403 error. Trying to retrieve attributes of a banned subreddit will result in a 404 error. .. _unix time: https://en.wikipedia.org/wiki/Unix_time """ # pylint: disable=too-many-public-methods STR_FIELD = "display_name" MESSAGE_PREFIX = "#" @staticmethod def _create_or_update( _reddit, allow_images=None, allow_post_crossposts=None, allow_top=None, collapse_deleted_comments=None, comment_score_hide_mins=None, description=None, domain=None, exclude_banned_modqueue=None, header_hover_text=None, hide_ads=None, lang=None, key_color=None, link_type=None, name=None, over_18=None, public_description=None, public_traffic=None, show_media=None, show_media_preview=None, spam_comments=None, spam_links=None, spam_selfposts=None, spoilers_enabled=None, sr=None, submit_link_label=None, submit_text=None, submit_text_label=None, subreddit_type=None, suggested_comment_sort=None, title=None, wiki_edit_age=None, wiki_edit_karma=None, wikimode=None, **other_settings, ): # pylint: disable=invalid-name,too-many-locals,too-many-arguments model = { "allow_images": allow_images, "allow_post_crossposts": allow_post_crossposts, "allow_top": allow_top, "collapse_deleted_comments": collapse_deleted_comments, "comment_score_hide_mins": comment_score_hide_mins, "description": description, "domain": domain, "exclude_banned_modqueue": exclude_banned_modqueue, "header-title": header_hover_text, # Remap here - better name "hide_ads": hide_ads, "key_color": key_color, "lang": lang, "link_type": link_type, "name": name, "over_18": over_18, "public_description": public_description, "public_traffic": public_traffic, "show_media": show_media, "show_media_preview": show_media_preview, "spam_comments": spam_comments, "spam_links": spam_links, "spam_selfposts": spam_selfposts, "spoilers_enabled": spoilers_enabled, "sr": sr, "submit_link_label": submit_link_label, "submit_text": submit_text, "submit_text_label": submit_text_label, "suggested_comment_sort": suggested_comment_sort, "title": title, "type": subreddit_type, "wiki_edit_age": wiki_edit_age, "wiki_edit_karma": wiki_edit_karma, "wikimode": wikimode, } model.update(other_settings) _reddit.post(API_PATH["site_admin"], data=model) @staticmethod def _subreddit_list(subreddit, other_subreddits): if other_subreddits: return ",".join([str(subreddit)] + [str(x) for x in other_subreddits]) return str(subreddit) @staticmethod def _validate_gallery(images): for image in images: image_path = image.get("image_path", "") if image_path: if not isfile(image_path): raise TypeError(f"{image_path!r} is not a valid image path.") else: raise TypeError("'image_path' is required.") if not len(image.get("caption", "")) <= 180: raise TypeError("Caption must be 180 characters or less.") @staticmethod def _validate_inline_media(inline_media: "praw.models.InlineMedia"): if not isfile(inline_media.path): raise ValueError(f"{inline_media.path!r} is not a valid file path.") @property def _kind(self) -> str: """Return the class's kind.""" return self._reddit.config.kinds["subreddit"] @cachedproperty def banned(self) -> "praw.models.reddit.subreddit.SubredditRelationship": """Provide an instance of :class:`.SubredditRelationship`. For example, to ban a user try: .. code-block:: python reddit.subreddit("SUBREDDIT").banned.add("NAME", ban_reason="...") To list the banned users along with any notes, try: .. code-block:: python for ban in reddit.subreddit("SUBREDDIT").banned(): print(f"{ban}: {ban.note}") """ return SubredditRelationship(self, "banned") @cachedproperty def collections(self) -> "praw.models.reddit.collections.SubredditCollections": r"""Provide an instance of :class:`.SubredditCollections`. To see the permalinks of all :class:`.Collection`\ s that belong to a subreddit, try: .. code-block:: python for collection in reddit.subreddit("SUBREDDIT").collections: print(collection.permalink) To get a specific :class:`.Collection` by its UUID or permalink, use one of the following: .. code-block:: python collection = reddit.subreddit("SUBREDDIT").collections("some_uuid") collection = reddit.subreddit("SUBREDDIT").collections( permalink="https://reddit.com/r/SUBREDDIT/collection/some_uuid" ) """ return self._subreddit_collections_class(self._reddit, self) @cachedproperty def contributor(self) -> "praw.models.reddit.subreddit.ContributorRelationship": """Provide an instance of :class:`.ContributorRelationship`. Contributors are also known as approved submitters. To add a contributor try: .. code-block:: python reddit.subreddit("SUBREDDIT").contributor.add("NAME") """ return ContributorRelationship(self, "contributor") @cachedproperty def emoji(self) -> SubredditEmoji: """Provide an instance of :class:`.SubredditEmoji`. This attribute can be used to discover all emoji for a subreddit: .. code-block:: python for emoji in reddit.subreddit("iama").emoji: print(emoji) A single emoji can be lazily retrieved via: .. code-block:: python reddit.subreddit("blah").emoji["emoji_name"] .. note:: Attempting to access attributes of an nonexistent emoji will result in a :class:`.ClientException`. """ return SubredditEmoji(self) @cachedproperty def filters(self) -> "praw.models.reddit.subreddit.SubredditFilters": """Provide an instance of :class:`.SubredditFilters`. For example, to add a filter, run: .. code-block:: python reddit.subreddit("all").filters.add("subreddit_name") """ return SubredditFilters(self) @cachedproperty def flair(self) -> "praw.models.reddit.subreddit.SubredditFlair": """Provide an instance of :class:`.SubredditFlair`. Use this attribute for interacting with a subreddit's flair. For example, to list all the flair for a subreddit which you have the ``flair`` moderator permission on try: .. code-block:: python for flair in reddit.subreddit("NAME").flair(): print(flair) Flair templates can be interacted with through this attribute via: .. code-block:: python for template in reddit.subreddit("NAME").flair.templates: print(template) """ return SubredditFlair(self) @cachedproperty def mod(self) -> "praw.models.reddit.subreddit.SubredditModeration": """Provide an instance of :class:`.SubredditModeration`. For example, to accept a moderation invite from subreddit ``r/test``: .. code-block:: python reddit.subreddit("test").mod.accept_invite() """ return SubredditModeration(self) @cachedproperty def moderator(self) -> "praw.models.reddit.subreddit.ModeratorRelationship": """Provide an instance of :class:`.ModeratorRelationship`. For example, to add a moderator try: .. code-block:: python reddit.subreddit("SUBREDDIT").moderator.add("NAME") To list the moderators along with their permissions try: .. code-block:: python for moderator in reddit.subreddit("SUBREDDIT").moderator(): print(f"{moderator}: {moderator.mod_permissions}") """ return ModeratorRelationship(self, "moderator") @cachedproperty def modmail(self) -> "praw.models.reddit.subreddit.Modmail": """Provide an instance of :class:`.Modmail`. For example, to send a new modmail from the subreddit ``r/test`` to user ``u/spez`` with the subject ``test`` along with a message body of ``hello``: .. code-block:: python reddit.subreddit("test").modmail.create("test", "hello", "spez") """ return Modmail(self) @cachedproperty def muted(self) -> "praw.models.reddit.subreddit.SubredditRelationship": """Provide an instance of :class:`.SubredditRelationship`. For example, muted users can be iterated through like so: .. code-block:: python for mute in reddit.subreddit("redditdev").muted(): print(f"{mute}: {mute.note}") """ return SubredditRelationship(self, "muted") @cachedproperty def quaran(self) -> "praw.models.reddit.subreddit.SubredditQuarantine": """Provide an instance of :class:`.SubredditQuarantine`. This property is named ``quaran`` because ``quarantine`` is a Subreddit attribute returned by Reddit to indicate whether or not a Subreddit is quarantined. To opt-in into a quarantined subreddit: .. code-block:: python reddit.subreddit("test").quaran.opt_in() """ return SubredditQuarantine(self) @cachedproperty def rules(self) -> SubredditRules: """Provide an instance of :class:`.SubredditRules`. Use this attribute for interacting with a subreddit's rules. For example, to list all the rules for a subreddit: .. code-block:: python for rule in reddit.subreddit("AskReddit").rules: print(rule) Moderators can also add rules to the subreddit. For example, to make a rule called ``"No spam"`` in the subreddit ``"NAME"``: .. code-block:: python reddit.subreddit("NAME").rules.mod.add( short_name="No spam", kind="all", description="Do not spam. Spam bad" ) """ return SubredditRules(self) @cachedproperty def stream(self) -> "praw.models.reddit.subreddit.SubredditStream": """Provide an instance of :class:`.SubredditStream`. Streams can be used to indefinitely retrieve new comments made to a subreddit, like: .. code-block:: python for comment in reddit.subreddit("iama").stream.comments(): print(comment) Additionally, new submissions can be retrieved via the stream. In the following example all submissions are fetched via the special subreddit ``r/all``: .. code-block:: python for submission in reddit.subreddit("all").stream.submissions(): print(submission) """ return SubredditStream(self) @cachedproperty def stylesheet(self) -> "praw.models.reddit.subreddit.SubredditStylesheet": """Provide an instance of :class:`.SubredditStylesheet`. For example, to add the css data ``.test{color:blue}`` to the existing stylesheet: .. code-block:: python subreddit = reddit.subreddit("SUBREDDIT") stylesheet = subreddit.stylesheet() stylesheet.stylesheet += ".test{color:blue}" subreddit.stylesheet.update(stylesheet.stylesheet) """ return SubredditStylesheet(self) @cachedproperty def widgets(self) -> "praw.models.SubredditWidgets": """Provide an instance of :class:`.SubredditWidgets`. **Example usage** Get all sidebar widgets: .. code-block:: python for widget in reddit.subreddit("redditdev").widgets.sidebar: print(widget) Get ID card widget: .. code-block:: python print(reddit.subreddit("redditdev").widgets.id_card) """ return SubredditWidgets(self) @cachedproperty def wiki(self) -> "praw.models.reddit.subreddit.SubredditWiki": """Provide an instance of :class:`.SubredditWiki`. This attribute can be used to discover all wikipages for a subreddit: .. code-block:: python for wikipage in reddit.subreddit("iama").wiki: print(wikipage) To fetch the content for a given wikipage try: .. code-block:: python wikipage = reddit.subreddit("iama").wiki["proof"] print(wikipage.content_md) """ return SubredditWiki(self) def __init__( self, reddit: "praw.Reddit", display_name: Optional[str] = None, _data: Optional[Dict[str, Any]] = None, ): """Initialize a Subreddit instance. :param reddit: An instance of :class:`~.Reddit`. :param display_name: The name of the subreddit. .. note:: This class should not be initialized directly. Instead obtain an instance via: ``reddit.subreddit("subreddit_name")`` """ if (display_name, _data).count(None) != 1: raise TypeError("Either `display_name` or `_data` must be provided.") if display_name: self.display_name = display_name super().__init__(reddit, _data=_data) self._path = API_PATH["subreddit"].format(subreddit=self) def _convert_to_fancypants(self, markdown_text: str) -> dict: """Convert a Markdown string to a dict for use with the ``richtext_json`` param. :param markdown_text: A Markdown string to convert. :returns: A dict in ``richtext_json`` format. """ text_data = {"output_mode": "rtjson", "markdown_text": markdown_text} return self._reddit.post(API_PATH["convert_rte_body"], text_data)["output"] def _fetch_info(self): return "subreddit_about", {"subreddit": self}, None def _fetch_data(self) -> dict: name, fields, params = self._fetch_info() path = API_PATH[name].format(**fields) return self._reddit.request("GET", path, params) def _fetch(self): data = self._fetch_data() data = data["data"] other = type(self)(self._reddit, _data=data) self.__dict__.update(other.__dict__) self._fetched = True def _parse_xml_response(self, response: Response): """Parse the XML from a response and raise any errors found.""" xml = response.text root = XML(xml) tags = [element.tag for element in root] if tags[:4] == ["Code", "Message", "ProposedSize", "MaxSizeAllowed"]: # Returned if image is too big code, message, actual, maximum_size = [element.text for element in root[:4]] raise TooLargeMediaException(int(maximum_size), int(actual)) def _submit_media(self, data: dict, timeout: int, websocket_url: str = None): """Submit and return an `image`, `video`, or `videogif`. This is a helper method for submitting posts that are not link posts or self posts. """ connection = None if websocket_url is not None: try: connection = websocket.create_connection(websocket_url, timeout=timeout) except ( websocket.WebSocketException, socket.error, BlockingIOError, ) as ws_exception: raise WebSocketException( "Error establishing websocket connection.", ws_exception ) self._reddit.post(API_PATH["submit"], data=data) if connection is None: return try: ws_update = loads(connection.recv()) connection.close() except ( websocket.WebSocketException, socket.error, BlockingIOError, ) as ws_exception: raise WebSocketException( "Websocket error. Check your media file. Your post may still have been" " created.", ws_exception, ) if ws_update.get("type") == "failed": raise MediaPostFailed url = ws_update["payload"]["redirect"] return self._reddit.submission(url=url) def _upload_media( self, media_path: str, expected_mime_prefix: Optional[str] = None, upload_type: str = "link", ): """Upload media and return its URL and a websocket (Undocumented endpoint). :param expected_mime_prefix: If provided, enforce that the media has a mime type that starts with the provided prefix. :param upload_type: One of ``link``, ``gallery'', or ``selfpost``. (default: ``link``) :returns: A tuple containing ``(media_url, websocket_url)`` for the piece of media. The websocket URL can be used to determine when media processing is finished, or it can be ignored. """ if media_path is None: media_path = join( dirname(dirname(dirname(__file__))), "images", "PRAW logo.png" ) file_name = basename(media_path).lower() file_extension = file_name.rpartition(".")[2] mime_type = { "png": "image/png", "mov": "video/quicktime", "mp4": "video/mp4", "jpg": "image/jpeg", "jpeg": "image/jpeg", "gif": "image/gif", }.get( file_extension, "image/jpeg" ) # default to JPEG if ( expected_mime_prefix is not None and mime_type.partition("/")[0] != expected_mime_prefix ): raise ClientException( f"Expected a mimetype starting with {expected_mime_prefix!r} but got" f" mimetype {mime_type!r} (from file extension {file_extension!r})." ) img_data = {"filepath": file_name, "mimetype": mime_type} url = API_PATH["media_asset"] # until we learn otherwise, assume this request always succeeds upload_response = self._reddit.post(url, data=img_data) upload_lease = upload_response["args"] upload_url = f"https:{upload_lease["action"]}" upload_data = {item["name"]: item["value"] for item in upload_lease["fields"]} with open(media_path, "rb") as media: response = self._reddit._core._requestor._http.post( upload_url, data=upload_data, files={"file": media} ) if not response.ok: self._parse_xml_response(response) response.raise_for_status() websocket_url = upload_response["asset"]["websocket_url"] if upload_type == "link": return f"{upload_url}/{upload_data["key"]}", websocket_url else: return upload_response["asset"]["asset_id"], websocket_url def _upload_inline_media(self, inline_media: "praw.models.InlineMedia"): """Upload media for use in self posts and return ``inline_media``. :param inline_media: An :class:`.InlineMedia` object to validate and upload. """ self._validate_inline_media(inline_media) inline_media.media_id = self._upload_media( inline_media.path, upload_type="selfpost" )[0] return inline_media def post_requirements(self) -> Dict[str, Union[str, int, bool]]: """Get the post requirements for a subreddit. :returns: A dict with the various requirements. The returned dict contains the following keys: - ``domain_blacklist`` - ``body_restriction_policy`` - ``domain_whitelist`` - ``title_regexes`` - ``body_blacklisted_strings`` - ``body_required_strings`` - ``title_text_min_length`` - ``is_flair_required`` - ``title_text_max_length`` - ``body_regexes`` - ``link_repost_age`` - ``body_text_min_length`` - ``link_restriction_policy`` - ``body_text_max_length`` - ``title_required_strings`` - ``title_blacklisted_strings`` - ``guidelines_text`` - ``guidelines_display_policy`` For example, to fetch the post requirements for ``r/test``: .. code-block:: python print(reddit.subreddit("test").post_requirements) """ return self._reddit.get( API_PATH["post_requirements"].format(subreddit=str(self)) ) def random(self) -> Union["praw.models.Submission", None]: """Return a random Submission. Returns ``None`` on subreddits that do not support the random feature. One example, at the time of writing, is ``r/wallpapers``. For example, to get a random submission off of ``r/AskReddit``: .. code-block:: python submission = reddit.subreddit("AskReddit").random() print(submission.title) """ url = API_PATH["subreddit_random"].format(subreddit=self) try: self._reddit.get(url, params={"unique": self._reddit._next_unique}) except Redirect as redirect: path = redirect.path try: return self._submission_class( self._reddit, url=urljoin(self._reddit.config.reddit_url, path) ) except ClientException: return None def search( self, query: str, sort: str = "relevance", syntax: str = "lucene", time_filter: str = "all", **generator_kwargs: Any, ) -> Iterator["praw.models.Submission"]: """Return a :class:`.ListingGenerator` for items that match ``query``. :param query: The query string to search for. :param sort: Can be one of: relevance, hot, top, new, comments. (default: relevance). :param syntax: Can be one of: cloudsearch, lucene, plain (default: lucene). :param time_filter: Can be one of: all, day, hour, month, week, year (default: all). For more information on building a search query see: https://www.reddit.com/wiki/search For example, to search all subreddits for ``praw`` try: .. code-block:: python for submission in reddit.subreddit("all").search("praw"): print(submission.title) """ self._validate_time_filter(time_filter) not_all = self.display_name.lower() != "all" self._safely_add_arguments( generator_kwargs, "params", q=query, restrict_sr=not_all, sort=sort, syntax=syntax, t=time_filter, ) url = API_PATH["search"].format(subreddit=self) return ListingGenerator(self._reddit, url, **generator_kwargs) def sticky(self, number: int = 1) -> "praw.models.Submission": """Return a Submission object for a sticky of the subreddit. :param number: Specify which sticky to return. 1 appears at the top (default: 1). :raises: ``prawcore.NotFound`` if the sticky does not exist. For example, to get the stickied post on the subreddit ``r/test``: .. code-block:: python reddit.subreddit("test").sticky() """ url = API_PATH["about_sticky"].format(subreddit=self) try: self._reddit.get(url, params={"num": number}) except Redirect as redirect: path = redirect.path return self._submission_class( self._reddit, url=urljoin(self._reddit.config.reddit_url, path) ) def submit( self, title: str, selftext: Optional[str] = None, url: Optional[str] = None, flair_id: Optional[str] = None, flair_text: Optional[str] = None, resubmit: bool = True, send_replies: bool = True, nsfw: bool = False, spoiler: bool = False, collection_id: Optional[str] = None, discussion_type: Optional[str] = None, inline_media: Optional[Dict[str, "praw.models.InlineMedia"]] = None, ) -> "praw.models.Submission": # noqa: D301 r"""Add a submission to the subreddit. :param title: The title of the submission. :param selftext: The Markdown formatted content for a ``text`` submission. Use an empty string, ``""``, to make a title-only submission. :param url: The URL for a ``link`` submission. :param collection_id: The UUID of a :class:`.Collection` to add the newly-submitted post to. :param flair_id: The flair template to select (default: None). :param flair_text: If the template's ``flair_text_editable`` value is True, this value will set a custom text (default: None). ``flair_id`` is required when ``flair_text`` is provided. :param resubmit: When False, an error will occur if the URL has already been submitted (default: True). :param send_replies: When True, messages will be sent to the submission author when comments are made to the submission (default: True). :param nsfw: Whether or not the submission should be marked NSFW (default: False). :param spoiler: Whether or not the submission should be marked as a spoiler (default: False). :param discussion_type: Set to ``CHAT`` to enable live discussion instead of traditional comments (default: None). :param inline_media: A dict of :class:`.InlineMedia` objects where the key is the placeholder name in ``selftext``. :returns: A :class:`~.Submission` object for the newly created submission. Either ``selftext`` or ``url`` can be provided, but not both. For example, to submit a URL to ``r/reddit_api_test`` do: .. code-block:: python title = "PRAW documentation" url = "https://praw.readthedocs.io" reddit.subreddit("reddit_api_test").submit(title, url=url) For example, to submit a self post with inline media do: .. code-block:: python from praw.models import InlineGif, InlineImage, InlineVideo gif = InlineGif("path/to/image.gif", "optional caption") image = InlineImage("path/to/image.jpg", "optional caption") video = InlineVideo("path/to/video.mp4", "optional caption") selftext = "Text with a gif {gif1} an image {image1} and a video {video1} inline" media = {"gif1": gif, "image1": image, "video1": video} reddit.subreddit("redditdev").submit("title", selftext=selftext, inline_media=media) .. note:: Inserted media will have a padding of ``\\n\\n`` automatically added. This is due to the weirdness with Reddit's API. Using the example above, the result selftext body will look like so: .. code-block:: Text with a gif ![gif](u1rchuphryq51 "optional caption") an image ![img](srnr8tshryq51 "optional caption") and video ![video](gmc7rvthryq51 "optional caption") inline .. seealso:: - :meth:`.submit_image` to submit images - :meth:`.submit_video` to submit videos and videogifs - :meth:`.submit_poll` to submit polls - :meth:`.submit_gallery`. to submit more than one image in the same post """ if (bool(selftext) or selftext == "") == bool(url): raise TypeError("Either `selftext` or `url` must be provided.") data = { "sr": str(self), "resubmit": bool(resubmit), "sendreplies": bool(send_replies), "title": title, "nsfw": bool(nsfw), "spoiler": bool(spoiler), "validate_on_submit": self._reddit.validate_on_submit, } for key, value in ( ("flair_id", flair_id), ("flair_text", flair_text), ("collection_id", collection_id), ("discussion_type", discussion_type), ): if value is not None: data[key] = value if selftext is not None: data.update(kind="self") if inline_media: body = selftext.format( **{ placeholder: self._upload_inline_media(media) for placeholder, media in inline_media.items() } ) converted = self._convert_to_fancypants(body) data.update(richtext_json=dumps(converted)) else: data.update(text=selftext) else: data.update(kind="link", url=url) return self._reddit.post(API_PATH["submit"], data=data) def submit_gallery( self, title: str, images: List[Dict[str, str]], *, collection_id: Optional[str] = None, discussion_type: Optional[str] = None, flair_id: Optional[str] = None, flair_text: Optional[str] = None, nsfw: bool = False, send_replies: bool = True, spoiler: bool = False, ): """Add an image gallery submission to the subreddit. :param title: The title of the submission. :param images: The images to post in dict with the following structure: ``{"image_path": "path", "caption": "caption", "outbound_url": "url"}``, only ``"image_path"`` is required. :param collection_id: The UUID of a :class:`.Collection` to add the newly-submitted post to. :param discussion_type: Set to ``CHAT`` to enable live discussion instead of traditional comments (default: None). :param flair_id: The flair template to select (default: None). :param flair_text: If the template's ``flair_text_editable`` value isTrue, this value will set a custom text (default: None). ``flair_id`` is required when ``flair_text`` is provided. :param nsfw: Whether or not the submission should be marked NSFW (default: False). :param send_replies: When True, messages will be sent to the submission author when comments are made to the submission (default: True). :param spoiler: Whether or not the submission should be marked asa spoiler (default: False). :returns: A :class:`.Submission` object for the newly created submission. :raises: :class:`.ClientException` if ``image_path`` in ``images`` refers to a file that is not an image. For example, to submit an image gallery to ``r/reddit_api_test`` do: .. code-block:: python title = "My favorite pictures" image = "/path/to/image.png" image2 = "/path/to/image2.png" image3 = "/path/to/image3.png" images = [ {"image_path": image}, { "image_path": image2, "caption": "Image caption 2", }, { "image_path": image3, "caption": "Image caption 3", "outbound_url": "https://example.com/link3", }, ] reddit.subreddit("reddit_api_test").submit_gallery(title, images) .. seealso:: - :meth:`.submit` to submit url posts and selftexts - :meth:`.submit_image`. to submit single images - :meth:`.submit_poll` to submit polls - :meth:`.submit_video`. to submit videos and videogifs """ self._validate_gallery(images) data = { "api_type": "json", "items": [], "nsfw": bool(nsfw), "sendreplies": bool(send_replies), "show_error_list": True, "spoiler": bool(spoiler), "sr": str(self), "title": title, "validate_on_submit": self._reddit.validate_on_submit, } for key, value in ( ("flair_id", flair_id), ("flair_text", flair_text), ("collection_id", collection_id), ("discussion_type", discussion_type), ): if value is not None: data[key] = value for image in images: data["items"].append( { "caption": image.get("caption", ""), "outbound_url": image.get("outbound_url", ""), "media_id": self._upload_media( image["image_path"], expected_mime_prefix="image", upload_type="gallery", )[0], } ) response = self._reddit.request( "POST", API_PATH["submit_gallery_post"], json=data )["json"] if response["errors"]: raise RedditAPIException(response["errors"]) else: return self._reddit.submission(url=response["data"]["url"]) def submit_image( self, title: str, image_path: str, flair_id: Optional[str] = None, flair_text: Optional[str] = None, resubmit: bool = True, send_replies: bool = True, nsfw: bool = False, spoiler: bool = False, timeout: int = 10, collection_id: Optional[str] = None, without_websockets: bool = False, discussion_type: Optional[str] = None, ): """Add an image submission to the subreddit. :param title: The title of the submission. :param image_path: The path to an image, to upload and post. :param collection_id: The UUID of a :class:`.Collection` to add the newly-submitted post to. :param flair_id: The flair template to select (default: None). :param flair_text: If the template's ``flair_text_editable`` value is True, this value will set a custom text (default: None). ``flair_id`` is required when ``flair_text`` is provided. :param resubmit: When False, an error will occur if the URL has already been submitted (default: True). :param send_replies: When True, messages will be sent to the submission author when comments are made to the submission (default: True). :param nsfw: Whether or not the submission should be marked NSFW (default: False). :param spoiler: Whether or not the submission should be marked as a spoiler (default: False). :param timeout: Specifies a particular timeout, in seconds. Use to avoid "Websocket error" exceptions (default: 10). :param without_websockets: Set to ``True`` to disable use of WebSockets (see note below for an explanation). If ``True``, this method doesn't return anything. (default: ``False``). :param discussion_type: Set to ``CHAT`` to enable live discussion instead of traditional comments (default: None). :returns: A :class:`.Submission` object for the newly created submission, unless ``without_websockets`` is ``True``. :raises: :class:`.ClientException` if ``image_path`` refers to a file that is not an image. .. note:: Reddit's API uses WebSockets to respond with the link of the newly created post. If this fails, the method will raise :class:`.WebSocketException`. Occasionally, the Reddit post will still be created. More often, there is an error with the image file. If you frequently get exceptions but successfully created posts, try setting the ``timeout`` parameter to a value above 10. To disable the use of WebSockets, set ``without_websockets=True``. This will make the method return ``None``, though the post will still be created. You may wish to do this if you are running your program in a restricted network environment, or using a proxy that doesn't support WebSockets connections. For example, to submit an image to ``r/reddit_api_test`` do: .. code-block:: python title = "My favorite picture" image = "/path/to/image.png" reddit.subreddit("reddit_api_test").submit_image(title, image) .. seealso:: - :meth:`.submit` to submit url posts and selftexts - :meth:`.submit_video`. to submit videos and videogifs - :meth:`.submit_gallery`. to submit more than one image in the same post """ data = { "sr": str(self), "resubmit": bool(resubmit), "sendreplies": bool(send_replies), "title": title, "nsfw": bool(nsfw), "spoiler": bool(spoiler), "validate_on_submit": self._reddit.validate_on_submit, } for key, value in ( ("flair_id", flair_id), ("flair_text", flair_text), ("collection_id", collection_id), ("discussion_type", discussion_type), ): if value is not None: data[key] = value image_url, websocket_url = self._upload_media( image_path, expected_mime_prefix="image" ) data.update(kind="image", url=image_url) if without_websockets: websocket_url = None return self._submit_media( data, timeout, websocket_url=websocket_url, ) def submit_poll( self, title: str, selftext: str, options: List[str], duration: int, flair_id: Optional[str] = None, flair_text: Optional[str] = None, resubmit: bool = True, send_replies: bool = True, nsfw: bool = False, spoiler: bool = False, collection_id: Optional[str] = None, discussion_type: Optional[str] = None, ): """Add a poll submission to the subreddit. :param title: The title of the submission. :param selftext: The Markdown formatted content for the submission. Use an empty string, ``""``, to make a submission with no text contents. :param options: A ``list`` of two to six poll options as ``str``. :param duration: The number of days the poll should accept votes, as an ``int``. Valid values are between ``1`` and ``7``, inclusive. :param collection_id: The UUID of a :class:`.Collection` to add the newly-submitted post to. :param flair_id: The flair template to select (default: None). :param flair_text: If the template's ``flair_text_editable`` value is True, this value will set a custom text (default: None). ``flair_id`` is required when ``flair_text`` is provided. :param resubmit: When False, an error will occur if the URL has already been submitted (default: True). :param send_replies: When True, messages will be sent to the submission author when comments are made to the submission (default: True). :param nsfw: Whether or not the submission should be marked NSFW (default: False). :param spoiler: Whether or not the submission should be marked as a spoiler (default: False). :param discussion_type: Set to ``CHAT`` to enable live discussion instead of traditional comments (default: None). :returns: A :class:`~.Submission` object for the newly created submission. For example, to submit a poll to ``r/reddit_api_test`` do: .. code-block:: python title = "Do you like PRAW?" reddit.subreddit("reddit_api_test").submit_poll( title, selftext="", options=["Yes", "No"], duration=3 ) """ data = { "sr": str(self), "text": selftext, "options": options, "duration": duration, "resubmit": bool(resubmit), "sendreplies": bool(send_replies), "title": title, "nsfw": bool(nsfw), "spoiler": bool(spoiler), "validate_on_submit": self._reddit.validate_on_submit, } for key, value in ( ("flair_id", flair_id), ("flair_text", flair_text), ("collection_id", collection_id), ("discussion_type", discussion_type), ): if value is not None: data[key] = value return self._reddit.post(API_PATH["submit_poll_post"], json=data) def submit_video( self, title: str, video_path: str, videogif: bool = False, thumbnail_path: Optional[str] = None, flair_id: Optional[str] = None, flair_text: Optional[str] = None, resubmit: bool = True, send_replies: bool = True, nsfw: bool = False, spoiler: bool = False, timeout: int = 10, collection_id: Optional[str] = None, without_websockets: bool = False, discussion_type: Optional[str] = None, ): """Add a video or videogif submission to the subreddit. :param title: The title of the submission. :param video_path: The path to a video, to upload and post. :param videogif: A ``bool`` value. If ``True``, the video is uploaded as a videogif, which is essentially a silent video (default: ``False``). :param thumbnail_path: (Optional) The path to an image, to be uploaded and used as the thumbnail for this video. If not provided, the PRAW logo will be used as the thumbnail. :param collection_id: The UUID of a :class:`.Collection` to add the newly-submitted post to. :param flair_id: The flair template to select (default: ``None``). :param flair_text: If the template's ``flair_text_editable`` value is True, this value will set a custom text (default: ``None``). ``flair_id`` is required when ``flair_text`` is provided. :param resubmit: When False, an error will occur if the URL has already been submitted (default: ``True``). :param send_replies: When True, messages will be sent to the submission author when comments are made to the submission (default: ``True``). :param nsfw: Whether or not the submission should be marked NSFW (default: False). :param spoiler: Whether or not the submission should be marked as a spoiler (default: False). :param timeout: Specifies a particular timeout, in seconds. Use to avoid "Websocket error" exceptions (default: 10). :param without_websockets: Set to ``True`` to disable use of WebSockets (see note below for an explanation). If ``True``, this method doesn't return anything. (default: ``False``). :param discussion_type: Set to ``CHAT`` to enable live discussion instead of traditional comments (default: None). :returns: A :class:`.Submission` object for the newly created submission, unless ``without_websockets`` is ``True``. :raises: :class:`.ClientException` if ``video_path`` refers to a file that is not a video. .. note:: Reddit's API uses WebSockets to respond with the link of the newly created post. If this fails, the method will raise :class:`.WebSocketException`. Occasionally, the Reddit post will still be created. More often, there is an error with the image file. If you frequently get exceptions but successfully created posts, try setting the ``timeout`` parameter to a value above 10. To disable the use of WebSockets, set ``without_websockets=True``. This will make the method return ``None``, though the post will still be created. You may wish to do this if you are running your program in a restricted network environment, or using a proxy that doesn't support WebSockets connections. For example, to submit a video to ``r/reddit_api_test`` do: .. code-block:: python title = "My favorite movie" video = "/path/to/video.mp4" reddit.subreddit("reddit_api_test").submit_video(title, video) .. seealso:: - :meth:`.submit` to submit url posts and selftexts - :meth:`.submit_image` to submit images - :meth:`.submit_gallery`. to submit more than one image in the same post """ data = { "sr": str(self), "resubmit": bool(resubmit), "sendreplies": bool(send_replies), "title": title, "nsfw": bool(nsfw), "spoiler": bool(spoiler), "validate_on_submit": self._reddit.validate_on_submit, } for key, value in ( ("flair_id", flair_id), ("flair_text", flair_text), ("collection_id", collection_id), ("discussion_type", discussion_type), ): if value is not None: data[key] = value video_url, websocket_url = self._upload_media( video_path, expected_mime_prefix="video" ) data.update( kind="videogif" if videogif else "video", url=video_url, # if thumbnail_path is None, it uploads the PRAW logo video_poster_url=self._upload_media(thumbnail_path)[0], ) if without_websockets: websocket_url = None return self._submit_media( data, timeout, websocket_url=websocket_url, ) def subscribe( self, other_subreddits: Optional[List["praw.models.Subreddit"]] = None ): """Subscribe to the subreddit. :param other_subreddits: When provided, also subscribe to the provided list of subreddits. For example, to subscribe to ``r/test``: .. code-block:: python reddit.subreddit("test").subscribe() """ data = { "action": "sub", "skip_inital_defaults": True, "sr_name": self._subreddit_list(self, other_subreddits), } self._reddit.post(API_PATH["subscribe"], data=data) def traffic(self) -> Dict[str, List[List[int]]]: """Return a dictionary of the subreddit's traffic statistics. :raises: ``prawcore.NotFound`` when the traffic stats aren't available to the authenticated user, that is, they are not public and the authenticated user is not a moderator of the subreddit. The traffic method returns a dict with three keys. The keys are ``day``, ``hour`` and ``month``. Each key contains a list of lists with 3 or 4 values. The first value is a timestamp indicating the start of the category (start of the day for the ``day`` key, start of the hour for the ``hour`` key, etc.). The second, third, and fourth values indicate the unique pageviews, total pageviews, and subscribers, respectively. .. note:: The ``hour`` key does not contain subscribers, and therefore each sub-list contains three values. For example, to get the traffic stats for ``r/test``: .. code-block:: python stats = reddit.subreddit("test").traffic() """ return self._reddit.get(API_PATH["about_traffic"].format(subreddit=self)) def unsubscribe( self, other_subreddits: Optional[List["praw.models.Subreddit"]] = None ): """Unsubscribe from the subreddit. :param other_subreddits: When provided, also unsubscribe from the provided list of subreddits. To unsubscribe from ``r/test``: .. code-block:: python reddit.subreddit("test").unsubscribe() """ data = { "action": "unsub", "sr_name": self._subreddit_list(self, other_subreddits), } self._reddit.post(API_PATH["subscribe"], data=data) WidgetEncoder._subreddit_class = Subreddit class SubredditFilters: """Provide functions to interact with the special Subreddit's filters. Members of this class should be utilized via ``Subreddit.filters``. For example, to add a filter, run: .. code-block:: python reddit.subreddit("all").filters.add("subreddit_name") """ def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditFilters instance. :param subreddit: The special subreddit whose filters to work with. As of this writing filters can only be used with the special subreddits ``all`` and ``mod``. """ self.subreddit = subreddit def __iter__(self) -> Generator["praw.models.Subreddit", None, None]: """Iterate through the special subreddit's filters. This method should be invoked as: .. code-block:: python for subreddit in reddit.subreddit("NAME").filters: ... """ url = API_PATH["subreddit_filter_list"].format( special=self.subreddit, user=self.subreddit._reddit.user.me() ) params = {"unique": self.subreddit._reddit._next_unique} response_data = self.subreddit._reddit.get(url, params=params) for subreddit in response_data.subreddits: yield subreddit def add(self, subreddit: Union["praw.models.Subreddit", str]): """Add ``subreddit`` to the list of filtered subreddits. :param subreddit: The subreddit to add to the filter list. Items from subreddits added to the filtered list will no longer be included when obtaining listings for ``r/all``. Alternatively, you can filter a subreddit temporarily from a special listing in a manner like so: .. code-block:: python reddit.subreddit("all-redditdev-learnpython") :raises: ``prawcore.NotFound`` when calling on a non-special subreddit. """ url = API_PATH["subreddit_filter"].format( special=self.subreddit, user=self.subreddit._reddit.user.me(), subreddit=subreddit, ) self.subreddit._reddit.put(url, data={"model": dumps({"name": str(subreddit)})}) def remove(self, subreddit: Union["praw.models.Subreddit", str]): """Remove ``subreddit`` from the list of filtered subreddits. :param subreddit: The subreddit to remove from the filter list. :raises: ``prawcore.NotFound`` when calling on a non-special subreddit. """ url = API_PATH["subreddit_filter"].format( special=self.subreddit, user=self.subreddit._reddit.user.me(), subreddit=str(subreddit), ) self.subreddit._reddit.delete(url) class SubredditFlair: """Provide a set of functions to interact with a Subreddit's flair.""" @cachedproperty def link_templates( self, ) -> "praw.models.reddit.subreddit.SubredditLinkFlairTemplates": """Provide an instance of :class:`.SubredditLinkFlairTemplates`. Use this attribute for interacting with a subreddit's link flair templates. For example to list all the link flair templates for a subreddit which you have the ``flair`` moderator permission on try: .. code-block:: python for template in reddit.subreddit("NAME").flair.link_templates: print(template) """ return SubredditLinkFlairTemplates(self.subreddit) @cachedproperty def templates( self, ) -> "praw.models.reddit.subreddit.SubredditRedditorFlairTemplates": """Provide an instance of :class:`.SubredditRedditorFlairTemplates`. Use this attribute for interacting with a subreddit's flair templates. For example to list all the flair templates for a subreddit which you have the ``flair`` moderator permission on try: .. code-block:: python for template in reddit.subreddit("NAME").flair.templates: print(template) """ return SubredditRedditorFlairTemplates(self.subreddit) def __call__( self, redditor: Optional[Union["praw.models.Redditor", str]] = None, **generator_kwargs: Any, ) -> Iterator["praw.models.Redditor"]: """Return a :class:`.ListingGenerator` for Redditors and their flairs. :param redditor: When provided, yield at most a single :class:`~.Redditor` instance (default: None). Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. Usage: .. code-block:: python for flair in reddit.subreddit("NAME").flair(limit=None): print(flair) """ Subreddit._safely_add_arguments(generator_kwargs, "params", name=redditor) generator_kwargs.setdefault("limit", None) url = API_PATH["flairlist"].format(subreddit=self.subreddit) return ListingGenerator(self.subreddit._reddit, url, **generator_kwargs) def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditFlair instance. :param subreddit: The subreddit whose flair to work with. """ self.subreddit = subreddit def configure( self, position: str = "right", self_assign: bool = False, link_position: str = "left", link_self_assign: bool = False, **settings: Any, ): """Update the subreddit's flair configuration. :param position: One of left, right, or False to disable (default: right). :param self_assign: (boolean) Permit self assignment of user flair (default: False). :param link_position: One of left, right, or False to disable (default: left). :param link_self_assign: (boolean) Permit self assignment of link flair (default: False). Additional keyword arguments can be provided to handle new settings as Reddit introduces them. """ data = { "flair_enabled": bool(position), "flair_position": position or "right", "flair_self_assign_enabled": self_assign, "link_flair_position": link_position or "", "link_flair_self_assign_enabled": link_self_assign, } data.update(settings) url = API_PATH["flairconfig"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data=data) def delete(self, redditor: Union["praw.models.Redditor", str]): """Delete flair for a Redditor. :param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. .. seealso:: :meth:`~praw.models.reddit.subreddit.SubredditFlair.update` to delete the flair of many Redditors at once. """ url = API_PATH["deleteflair"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data={"name": str(redditor)}) def delete_all(self) -> List[Dict[str, Union[str, bool, Dict[str, str]]]]: """Delete all Redditor flair in the Subreddit. :returns: List of dictionaries indicating the success or failure of each delete. """ return self.update(x["user"] for x in self()) def set( self, redditor: Union["praw.models.Redditor", str], text: str = "", css_class: str = "", flair_template_id: Optional[str] = None, ): """Set flair for a Redditor. :param redditor: (Required) A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. :param text: The flair text to associate with the Redditor or Submission (default: ""). :param css_class: The css class to associate with the flair html ((default: "")). Use either this or ``flair_template_id``. :param flair_template_id: The ID of the flair template to be used (default: ``None``). Use either this or ``css_class``. This method can only be used by an authenticated user who is a moderator of the associated Subreddit. For example: .. code-block:: python reddit.subreddit("redditdev").flair.set("bboe", "PRAW author", css_class="mods") template = "6bd28436-1aa7-11e9-9902-0e05ab0fad46" reddit.subreddit("redditdev").flair.set( "spez", "Reddit CEO", flair_template_id=template ) """ if css_class and flair_template_id is not None: raise TypeError( "Parameter `css_class` cannot be used in conjunction with" " `flair_template_id`." ) data = {"name": str(redditor), "text": text} if flair_template_id is not None: data["flair_template_id"] = flair_template_id url = API_PATH["select_flair"].format(subreddit=self.subreddit) else: data["css_class"] = css_class url = API_PATH["flair"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data=data) def update( self, flair_list: Iterator[ Union[ str, "praw.models.Redditor", Dict[str, Union[str, "praw.models.Redditor"]], ] ], text: str = "", css_class: str = "", ) -> List[Dict[str, Union[str, bool, Dict[str, str]]]]: """Set or clear the flair for many Redditors at once. :param flair_list: Each item in this list should be either: the name of a Redditor, an instance of :class:`.Redditor`, or a dictionary mapping keys ``user``, ``flair_text``, and ``flair_css_class`` to their respective values. The ``user`` key should map to a Redditor, as described above. When a dictionary isn't provided, or the dictionary is missing one of ``flair_text``, or ``flair_css_class`` attributes the default values will come from the the following arguments. :param text: The flair text to use when not explicitly provided in ``flair_list`` (default: ""). :param css_class: The css class to use when not explicitly provided in ``flair_list`` (default: ""). :returns: List of dictionaries indicating the success or failure of each update. For example, to clear the flair text, and set the ``praw`` flair css class on a few users try: .. code-block:: python subreddit.flair.update(["bboe", "spez", "spladug"], css_class="praw") """ templines = StringIO() for item in flair_list: if isinstance(item, dict): writer(templines).writerow( [ str(item["user"]), item.get("flair_text", text), item.get("flair_css_class", css_class), ] ) else: writer(templines).writerow([str(item), text, css_class]) lines = templines.getvalue().splitlines() templines.close() response = [] url = API_PATH["flaircsv"].format(subreddit=self.subreddit) while lines: data = {"flair_csv": "\n".join(lines[:100])} response.extend(self.subreddit._reddit.post(url, data=data)) lines = lines[100:] return response class SubredditFlairTemplates: """Provide functions to interact with a Subreddit's flair templates.""" @staticmethod def flair_type(is_link: bool) -> str: """Return LINK_FLAIR or USER_FLAIR depending on ``is_link`` value.""" return "LINK_FLAIR" if is_link else "USER_FLAIR" def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditFlairTemplate instance. :param subreddit: The subreddit whose flair templates to work with. .. note:: This class should not be initialized directly. Instead obtain an instance via: ``reddit.subreddit("subreddit_name").flair.templates`` or ``reddit.subreddit("subreddit_name").flair.link_templates``. """ self.subreddit = subreddit def __iter__(self): """Abstract method to return flair templates.""" raise NotImplementedError() def _add( self, text: str, css_class: str = "", text_editable: bool = False, is_link: Optional[bool] = None, background_color: Optional[str] = None, text_color: Optional[str] = None, mod_only: Optional[bool] = None, allowable_content: Optional[str] = None, max_emojis: Optional[int] = None, ): url = API_PATH["flairtemplate_v2"].format(subreddit=self.subreddit) data = { "allowable_content": allowable_content, "background_color": background_color, "css_class": css_class, "flair_type": self.flair_type(is_link), "max_emojis": max_emojis, "mod_only": bool(mod_only), "text": text, "text_color": text_color, "text_editable": bool(text_editable), } self.subreddit._reddit.post(url, data=data) def _clear(self, is_link: Optional[bool] = None): url = API_PATH["flairtemplateclear"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data={"flair_type": self.flair_type(is_link)}) def delete(self, template_id: str): """Remove a flair template provided by ``template_id``. For example, to delete the first Redditor flair template listed, try: .. code-block:: python template_info = list(subreddit.flair.templates)[0] subreddit.flair.templates.delete(template_info["id"]) """ url = API_PATH["flairtemplatedelete"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data={"flair_template_id": template_id}) def update( self, template_id: str, text: Optional[str] = None, css_class: Optional[str] = None, text_editable: Optional[bool] = None, background_color: Optional[str] = None, text_color: Optional[str] = None, mod_only: Optional[bool] = None, allowable_content: Optional[str] = None, max_emojis: Optional[int] = None, fetch: bool = True, ): """Update the flair template provided by ``template_id``. :param template_id: The flair template to update. If not valid then an exception will be thrown. :param text: The flair template's new text (required). :param css_class: The flair template's new css_class (default: ""). :param text_editable: (boolean) Indicate if the flair text can be modified for each Redditor that sets it (default: False). :param background_color: The flair template's new background color, as a hex color. :param text_color: The flair template's new text color, either ``"light"`` or ``"dark"``. :param mod_only: (boolean) Indicate if the flair can only be used by moderators. :param allowable_content: If specified, most be one of ``"all"``, ``"emoji"``, or ``"text"`` to restrict content to that type. If set to ``"emoji"`` then the ``"text"`` param must be a valid emoji string, for example, ``":snoo:"``. :param max_emojis: (int) Maximum emojis in the flair (Reddit defaults this value to 10). :param fetch: Whether or not PRAW will fetch existing information on the existing flair before updating (Default: True). .. warning:: If parameter ``fetch`` is set to ``False``, all parameters not provided will be reset to default (``None`` or ``False``) values. For example, to make a user flair template text_editable, try: .. code-block:: python template_info = list(subreddit.flair.templates)[0] subreddit.flair.templates.update( template_info["id"], template_info["flair_text"], text_editable=True ) """ url = API_PATH["flairtemplate_v2"].format(subreddit=self.subreddit) data = { "allowable_content": allowable_content, "background_color": background_color, "css_class": css_class, "flair_template_id": template_id, "max_emojis": max_emojis, "mod_only": mod_only, "text": text, "text_color": text_color, "text_editable": text_editable, } if fetch: _existing_data = [ template for template in iter(self) if template["id"] == template_id ] if len(_existing_data) != 1: raise InvalidFlairTemplateID(template_id) else: existing_data = _existing_data[0] for key, value in existing_data.items(): if data.get(key) is None: data[key] = value self.subreddit._reddit.post(url, data=data) class SubredditRedditorFlairTemplates(SubredditFlairTemplates): """Provide functions to interact with Redditor flair templates.""" def __iter__( self, ) -> Generator[Dict[str, Union[str, int, bool, List[Dict[str, str]]]], None, None]: """Iterate through the user flair templates. For example: .. code-block:: python for template in reddit.subreddit("NAME").flair.templates: print(template) """ url = API_PATH["user_flair"].format(subreddit=self.subreddit) params = {"unique": self.subreddit._reddit._next_unique} for template in self.subreddit._reddit.get(url, params=params): yield template def add( self, text: str, css_class: str = "", text_editable: bool = False, background_color: Optional[str] = None, text_color: Optional[str] = None, mod_only: Optional[bool] = None, allowable_content: Optional[str] = None, max_emojis: Optional[int] = None, ): """Add a Redditor flair template to the associated subreddit. :param text: The flair template's text (required). :param css_class: The flair template's css_class (default: ""). :param text_editable: (boolean) Indicate if the flair text can be modified for each Redditor that sets it (default: False). :param background_color: The flair template's new background color, as a hex color. :param text_color: The flair template's new text color, either ``"light"`` or ``"dark"``. :param mod_only: (boolean) Indicate if the flair can only be used by moderators. :param allowable_content: If specified, most be one of ``"all"``, ``"emoji"``, or ``"text"`` to restrict content to that type. If set to ``"emoji"`` then the ``"text"`` param must be a valid emoji string, for example, ``":snoo:"``. :param max_emojis: (int) Maximum emojis in the flair (Reddit defaults this value to 10). For example, to add an editable Redditor flair try: .. code-block:: python reddit.subreddit("NAME").flair.templates.add(css_class="praw", text_editable=True) """ self._add( text, css_class=css_class, text_editable=text_editable, is_link=False, background_color=background_color, text_color=text_color, mod_only=mod_only, allowable_content=allowable_content, max_emojis=max_emojis, ) def clear(self): """Remove all Redditor flair templates from the subreddit. For example: .. code-block:: python reddit.subreddit("NAME").flair.templates.clear() """ self._clear(is_link=False) class SubredditLinkFlairTemplates(SubredditFlairTemplates): """Provide functions to interact with link flair templates.""" def __iter__( self, ) -> Generator[Dict[str, Union[str, int, bool, List[Dict[str, str]]]], None, None]: """Iterate through the link flair templates. For example: .. code-block:: python for template in reddit.subreddit("NAME").flair.link_templates: print(template) """ url = API_PATH["link_flair"].format(subreddit=self.subreddit) for template in self.subreddit._reddit.get(url): yield template def add( self, text: str, css_class: str = "", text_editable: bool = False, background_color: Optional[str] = None, text_color: Optional[str] = None, mod_only: Optional[bool] = None, allowable_content: Optional[str] = None, max_emojis: Optional[int] = None, ): """Add a link flair template to the associated subreddit. :param text: The flair template's text (required). :param css_class: The flair template's css_class (default: ""). :param text_editable: (boolean) Indicate if the flair text can be modified for each Redditor that sets it (default: False). :param background_color: The flair template's new background color, as a hex color. :param text_color: The flair template's new text color, either ``"light"`` or ``"dark"``. :param mod_only: (boolean) Indicate if the flair can only be used by moderators. :param allowable_content: If specified, most be one of ``"all"``, ``"emoji"``, or ``"text"`` to restrict content to that type. If set to ``"emoji"`` then the ``"text"`` param must be a valid emoji string, for example, ``":snoo:"``. :param max_emojis: (int) Maximum emojis in the flair (Reddit defaults this value to 10). For example, to add an editable link flair try: .. code-block:: python reddit.subreddit("NAME").flair.link_templates.add(css_class="praw", text_editable=True) """ self._add( text, css_class=css_class, text_editable=text_editable, is_link=True, background_color=background_color, text_color=text_color, mod_only=mod_only, allowable_content=allowable_content, max_emojis=max_emojis, ) def clear(self): """Remove all link flair templates from the subreddit. For example: .. code-block:: python reddit.subreddit("NAME").flair.link_templates.clear() """ self._clear(is_link=True) class SubredditModeration: """Provides a set of moderation functions to a Subreddit. For example, to accept a moderation invite from subreddit ``r/test``: .. code-block:: python reddit.subreddit("test").mod.accept_invite() """ @staticmethod def _handle_only(only: Optional[str], generator_kwargs: Dict[str, Any]): if only is not None: if only == "submissions": only = "links" RedditBase._safely_add_arguments(generator_kwargs, "params", only=only) def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditModeration instance. :param subreddit: The subreddit to moderate. """ self.subreddit = subreddit self._stream = None def accept_invite(self): """Accept an invitation as a moderator of the community.""" url = API_PATH["accept_mod_invite"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url) def edited( self, only: Optional[str] = None, **generator_kwargs: Any ) -> Iterator[Union["praw.models.Comment", "praw.models.Submission"]]: """Return a :class:`.ListingGenerator` for edited comments and submissions. :param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield only results of that type. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. To print all items in the edited queue try: .. code-block:: python for item in reddit.subreddit("mod").mod.edited(limit=None): print(item) """ self._handle_only(only, generator_kwargs) return ListingGenerator( self.subreddit._reddit, API_PATH["about_edited"].format(subreddit=self.subreddit), **generator_kwargs, ) def inbox( self, **generator_kwargs: Any ) -> Iterator["praw.models.SubredditMessage"]: """Return a :class:`.ListingGenerator` for moderator messages. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. .. seealso:: :meth:`~.unread` for unread moderator messages. To print the last 5 moderator mail messages and their replies, try: .. code-block:: python for message in reddit.subreddit("mod").mod.inbox(limit=5): print(f"From: {message.author}, Body: {message.body}") for reply in message.replies: print(f"From: {reply.author}, Body: {reply.body}") """ return ListingGenerator( self.subreddit._reddit, API_PATH["moderator_messages"].format(subreddit=self.subreddit), **generator_kwargs, ) def log( self, action: Optional[str] = None, mod: Optional[Union["praw.models.Redditor", str]] = None, **generator_kwargs: Any, ) -> Iterator["praw.models.ModAction"]: """Return a :class:`.ListingGenerator` for moderator log entries. :param action: If given, only return log entries for the specified action. :param mod: If given, only return log entries for actions made by the passed in Redditor. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. To print the moderator and subreddit of the last 5 modlog entries try: .. code-block:: python for log in reddit.subreddit("mod").mod.log(limit=5): print(f"Mod: {log.mod}, Subreddit: {log.subreddit}") """ params = {"mod": str(mod) if mod else mod, "type": action} Subreddit._safely_add_arguments(generator_kwargs, "params", **params) return ListingGenerator( self.subreddit._reddit, API_PATH["about_log"].format(subreddit=self.subreddit), **generator_kwargs, ) def modqueue( self, only: Optional[str] = None, **generator_kwargs: Any ) -> Iterator[Union["praw.models.Submission", "praw.models.Comment"]]: """Return a :class:`.ListingGenerator` for modqueue items. :param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield only results of that type. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. To print all modqueue items try: .. code-block:: python for item in reddit.subreddit("mod").mod.modqueue(limit=None): print(item) """ self._handle_only(only, generator_kwargs) return ListingGenerator( self.subreddit._reddit, API_PATH["about_modqueue"].format(subreddit=self.subreddit), **generator_kwargs, ) @cachedproperty def stream(self) -> "praw.models.reddit.subreddit.SubredditModerationStream": """Provide an instance of :class:`.SubredditModerationStream`. Streams can be used to indefinitely retrieve Moderator only items from :class:`.SubredditModeration` made to moderated subreddits, like: .. code-block:: python for log in reddit.subreddit("mod").mod.stream.log(): print(f"Mod: {log.mod}, Subreddit: {log.subreddit}") """ return SubredditModerationStream(self.subreddit) @cachedproperty def removal_reasons(self) -> SubredditRemovalReasons: """Provide an instance of :class:`.SubredditRemovalReasons`. Use this attribute for interacting with a subreddit's removal reasons. For example to list all the removal reasons for a subreddit which you have the ``posts`` moderator permission on, try: .. code-block:: python for removal_reason in reddit.subreddit("NAME").mod.removal_reasons: print(removal_reason) A single removal reason can be lazily retrieved via: .. code-block:: python reddit.subreddit("NAME").mod.removal_reasons["reason_id"] .. note:: Attempting to access attributes of an nonexistent removal reason will result in a :class:`.ClientException`. """ return SubredditRemovalReasons(self.subreddit) def reports( self, only: Optional[str] = None, **generator_kwargs: Any ) -> Iterator[Union["praw.models.Submission", "praw.models.Comment"]]: """Return a :class:`.ListingGenerator` for reported comments and submissions. :param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield only results of that type. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. To print the user and mod report reasons in the report queue try: .. code-block:: python for reported_item in reddit.subreddit("mod").mod.reports(): print(f"User Reports: {reported_item.user_reports}") print(f"Mod Reports: {reported_item.mod_reports}") """ self._handle_only(only, generator_kwargs) return ListingGenerator( self.subreddit._reddit, API_PATH["about_reports"].format(subreddit=self.subreddit), **generator_kwargs, ) def settings(self) -> Dict[str, Union[str, int, bool]]: """Return a dictionary of the subreddit's current settings.""" url = API_PATH["subreddit_settings"].format(subreddit=self.subreddit) return self.subreddit._reddit.get(url)["data"] def spam( self, only: Optional[str] = None, **generator_kwargs: Any ) -> Iterator[Union["praw.models.Submission", "praw.models.Comment"]]: """Return a :class:`.ListingGenerator` for spam comments and submissions. :param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield only results of that type. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. To print the items in the spam queue try: .. code-block:: python for item in reddit.subreddit("mod").mod.spam(): print(item) """ self._handle_only(only, generator_kwargs) return ListingGenerator( self.subreddit._reddit, API_PATH["about_spam"].format(subreddit=self.subreddit), **generator_kwargs, ) def unmoderated( self, **generator_kwargs: Any ) -> Iterator["praw.models.Submission"]: """Return a :class:`.ListingGenerator` for unmoderated submissions. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. To print the items in the unmoderated queue try: .. code-block:: python for item in reddit.subreddit("mod").mod.unmoderated(): print(item) """ return ListingGenerator( self.subreddit._reddit, API_PATH["about_unmoderated"].format(subreddit=self.subreddit), **generator_kwargs, ) def unread( self, **generator_kwargs: Any ) -> Iterator["praw.models.SubredditMessage"]: """Return a :class:`.ListingGenerator` for unread moderator messages. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. .. seealso:: :meth:`inbox` for all messages. To print the mail in the unread modmail queue try: .. code-block:: python for message in reddit.subreddit("mod").mod.unread(): print(f"From: {message.author}, To: {message.dest}") """ return ListingGenerator( self.subreddit._reddit, API_PATH["moderator_unread"].format(subreddit=self.subreddit), **generator_kwargs, ) def update( self, **settings: Union[str, int, bool] ) -> Dict[str, Union[str, int, bool]]: """Update the subreddit's settings. See https://www.reddit.com/dev/api#POST_api_site_admin for the full list. :param all_original_content: Mandate all submissions to be original content only. :param allow_chat_post_creation: Allow users to create chat submissions. :param allow_images: Allow users to upload images using the native image hosting. :param allow_polls: Allow users to post polls to the subreddit. :param allow_post_crossposts: Allow users to crosspost submissions from other subreddits. :param allow_videos: Allow users to upload videos using the native image hosting. :param collapse_deleted_comments: Collapse deleted and removed comments on comments pages by default. :param comment_score_hide_mins: The number of minutes to hide comment scores. :param content_options: The types of submissions users can make. One of ``any``, ``link``, ``self``. :param crowd_control_chat_level: Controls the crowd control level for chat rooms. Goes from 0-3. :param crowd_control_level: Controls the crowd control level for submissions. Goes from 0-3. :param crowd_control_mode: Enables/disables crowd control. :param default_set: Allow the subreddit to appear on ``r/all`` as well as the default and trending lists. :param disable_contributor_requests: Specifies whether redditors may send automated modmail messages requesting approval as a submitter. :param exclude_banned_modqueue: Exclude posts by site-wide banned users from modqueue/unmoderated. :param free_form_reports: Allow users to specify custom reasons in the report menu. :param header_hover_text: The text seen when hovering over the snoo. :param hide_ads: Don't show ads within this subreddit. Only applies to Premium-user only subreddits. :param key_color: A 6-digit rgb hex color (e.g. ``"#AABBCC"``), used as a thematic color for your subreddit on mobile. :param language: A valid IETF language tag (underscore separated). :param original_content_tag_enabled: Enables the use of the ``original content`` label for submissions. :param over_18: Viewers must be over 18 years old (i.e. NSFW). :param public_description: Public description blurb. Appears in search results and on the landing page for private subreddits. :param restrict_commenting: Specifies whether approved users have the ability to comment. :param restrict_posting: Specifies whether approved users have the ability to submit posts. :param show_media: Show thumbnails on submissions. :param show_media_preview: Expand media previews on comments pages. :param spam_comments: Spam filter strength for comments. One of ``all``, ``low``, ``high``. :param spam_links: Spam filter strength for links. One of ``all``, ``low``, ``high``. :param spam_selfposts: Spam filter strength for selfposts. One of ``all``, ``low``, ``high``. :param spoilers_enabled: Enable marking posts as containing spoilers. :param submit_link_label: Custom label for submit link button (None for default). :param submit_text: Text to show on submission page. :param submit_text_label: Custom label for submit text post button (None for default). :param subreddit_type: One of ``archived``, ``employees_only``, ``gold_only``, ``gold_restricted``, ``private``, ``public``, ``restricted``. :param suggested_comment_sort: All comment threads will use this sorting method by default. Leave None, or choose one of ``confidence``, ``controversial``, ``live``, ``new``, ``old``, ``qa``, ``random``, ``top``. :param title: The title of the subreddit. :param welcome_message_enabled: Enables the subreddit welcome message. :param welcome_message_text: The text to be used as a welcome message. A welcome message is sent to all new subscribers by a Reddit bot. :param wiki_edit_age: Account age, in days, required to edit and create wiki pages. :param wiki_edit_karma: "praw.models.Subreddit" karma required to edit and create wiki pages. :param wikimode: One of ``anyone``, ``disabled``, ``modonly``. .. note:: Updating the subreddit sidebar on old reddit (``description``) is no longer supported using this method. You can update the sidebar by editing the ``"config/sidebar"`` wiki page. For example: .. code-block:: python sidebar = reddit.subreddit("test").wiki["config/sidebar"] sidebar.edit(content="new sidebar content") Additional keyword arguments can be provided to handle new settings as Reddit introduces them. Settings that are documented here and aren't explicitly set by you in a call to :meth:`.SubredditModeration.update` should retain their current value. If they do not, please file a bug. """ # These attributes come out using different names than they go in. remap = { "content_options": "link_type", "default_set": "allow_top", "header_hover_text": "header_title", "language": "lang", "subreddit_type": "type", } settings = {remap.get(key, key): value for key, value in settings.items()} settings["sr"] = self.subreddit.fullname return self.subreddit._reddit.patch(API_PATH["update_settings"], json=settings) class SubredditModerationStream: """Provides moderator streams.""" def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditModerationStream instance. :param subreddit: The moderated subreddit associated with the streams. """ self.subreddit = subreddit def edited( self, only: Optional[str] = None, **stream_options: Any ) -> Generator[Union["praw.models.Comment", "praw.models.Submission"], None, None]: """Yield edited comments and submissions as they become available. :param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield only results of that type. Keyword arguments are passed to :func:`.stream_generator`. For example, to retrieve all new edited submissions/comments made to all moderated subreddits, try: .. code-block:: python for item in reddit.subreddit("mod").mod.stream.edited(): print(item) """ return stream_generator(self.subreddit.mod.edited, only=only, **stream_options) def log( self, action: Optional[str] = None, mod: Optional[Union[str, "praw.models.Redditor"]] = None, **stream_options: Any, ) -> Generator["praw.models.ModAction", None, None]: """Yield moderator log entries as they become available. :param action: If given, only return log entries for the specified action. :param mod: If given, only return log entries for actions made by the passed in Redditor. For example, to retrieve all new mod actions made to all moderated subreddits, try: .. code-block:: python for log in reddit.subreddit("mod").mod.stream.log(): print(f"Mod: {log.mod}, Subreddit: {log.subreddit}") """ return stream_generator( self.subreddit.mod.log, action=action, mod=mod, attribute_name="id", **stream_options, ) def modmail_conversations( self, other_subreddits: Optional[List["praw.models.Subreddit"]] = None, sort: Optional[str] = None, state: Optional[str] = None, **stream_options: Any, ) -> Generator[ModmailConversation, None, None]: """Yield new-modmail conversations as they become available. :param other_subreddits: A list of :class:`.Subreddit` instances for which to fetch conversations (default: None). :param sort: Can be one of: mod, recent, unread, user (default: recent). :param state: Can be one of: all, appeals, archived, default, highlighted, inbox, inprogress, mod, new, notifications (default: all). "all" does not include mod or archived conversations. "inbox" does not include appeals conversations. Keyword arguments are passed to :func:`.stream_generator`. To print new mail in the unread modmail queue try: .. code-block:: python subreddit = reddit.subreddit("all") for message in subreddit.mod.stream.modmail_conversations(): print(f"From: {message.owner}, To: {message.participant}") """ # noqa: E501 if self.subreddit == "mod": self.subreddit = self.subreddit._reddit.subreddit("all") return stream_generator( self.subreddit.modmail.conversations, other_subreddits=other_subreddits, sort=sort, state=state, attribute_name="id", exclude_before=True, **stream_options, ) def modqueue( self, only: Optional[str] = None, **stream_options: Any ) -> Generator[Union["praw.models.Comment", "praw.models.Submission"], None, None]: """Yield comments/submissions in the modqueue as they become available. :param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield only results of that type. Keyword arguments are passed to :func:`.stream_generator`. To print all new modqueue items try: .. code-block:: python for item in reddit.subreddit("mod").mod.stream.modqueue(): print(item) """ return stream_generator( self.subreddit.mod.modqueue, only=only, **stream_options ) def reports( self, only: Optional[str] = None, **stream_options: Any ) -> Generator[Union["praw.models.Comment", "praw.models.Submission"], None, None]: """Yield reported comments and submissions as they become available. :param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield only results of that type. Keyword arguments are passed to :func:`.stream_generator`. To print new user and mod report reasons in the report queue try: .. code-block:: python for item in reddit.subreddit("mod").mod.stream.reports(): print(item) """ return stream_generator(self.subreddit.mod.reports, only=only, **stream_options) def spam( self, only: Optional[str] = None, **stream_options: Any ) -> Generator[Union["praw.models.Comment", "praw.models.Submission"], None, None]: """Yield spam comments and submissions as they become available. :param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield only results of that type. Keyword arguments are passed to :func:`.stream_generator`. To print new items in the spam queue try: .. code-block:: python for item in reddit.subreddit("mod").mod.stream.spam(): print(item) """ return stream_generator(self.subreddit.mod.spam, only=only, **stream_options) def unmoderated( self, **stream_options: Any ) -> Generator["praw.models.Submission", None, None]: """Yield unmoderated submissions as they become available. Keyword arguments are passed to :func:`.stream_generator`. To print new items in the unmoderated queue try: .. code-block:: python for item in reddit.subreddit("mod").mod.stream.unmoderated(): print(item) """ return stream_generator(self.subreddit.mod.unmoderated, **stream_options) def unread( self, **stream_options: Any ) -> Generator["praw.models.SubredditMessage", None, None]: """Yield unread old modmail messages as they become available. Keyword arguments are passed to :func:`.stream_generator`. .. seealso:: :meth:`~.inbox` for all messages. To print new mail in the unread modmail queue try: .. code-block:: python for message in reddit.subreddit("mod").mod.stream.unread(): print(f"From: {message.author}, To: {message.dest}") """ return stream_generator(self.subreddit.mod.unread, **stream_options) class SubredditQuarantine: """Provides subreddit quarantine related methods. To opt-in into a quarantined subreddit: .. code-block:: python reddit.subreddit("test").quaran.opt_in() """ def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditQuarantine instance. :param subreddit: The subreddit associated with the quarantine. """ self.subreddit = subreddit def opt_in(self): """Permit your user access to the quarantined subreddit. Usage: .. code-block:: python subreddit = reddit.subreddit("QUESTIONABLE") next(subreddit.hot()) # Raises prawcore.Forbidden subreddit.quaran.opt_in() next(subreddit.hot()) # Returns Submission """ data = {"sr_name": self.subreddit} try: self.subreddit._reddit.post(API_PATH["quarantine_opt_in"], data=data) except Redirect: pass def opt_out(self): """Remove access to the quarantined subreddit. Usage: .. code-block:: python subreddit = reddit.subreddit("QUESTIONABLE") next(subreddit.hot()) # Returns Submission subreddit.quaran.opt_out() next(subreddit.hot()) # Raises prawcore.Forbidden """ data = {"sr_name": self.subreddit} try: self.subreddit._reddit.post(API_PATH["quarantine_opt_out"], data=data) except Redirect: pass class SubredditRelationship: """Represents a relationship between a redditor and subreddit. Instances of this class can be iterated through in order to discover the Redditors that make up the relationship. For example, banned users of a subreddit can be iterated through like so: .. code-block:: python for ban in reddit.subreddit("redditdev").banned(): print(f"{ban}: {ban.note}") """ def __call__( self, redditor: Optional[Union[str, "praw.models.Redditor"]] = None, **generator_kwargs, ) -> Iterator["praw.models.Redditor"]: """Return a :class:`.ListingGenerator` for Redditors in the relationship. :param redditor: When provided, yield at most a single :class:`~.Redditor` instance. This is useful to confirm if a relationship exists, or to fetch the metadata associated with a particular relationship (default: None). Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. """ Subreddit._safely_add_arguments(generator_kwargs, "params", user=redditor) url = API_PATH[f"list_{self.relationship}"].format(subreddit=self.subreddit) return ListingGenerator(self.subreddit._reddit, url, **generator_kwargs) def __init__(self, subreddit: "praw.models.Subreddit", relationship: str): """Create a SubredditRelationship instance. :param subreddit: The subreddit for the relationship. :param relationship: The name of the relationship. """ self.relationship = relationship self.subreddit = subreddit def add(self, redditor: Union[str, "praw.models.Redditor"], **other_settings: Any): """Add ``redditor`` to this relationship. :param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. """ data = {"name": str(redditor), "type": self.relationship} data.update(other_settings) url = API_PATH["friend"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data=data) def remove(self, redditor: Union[str, "praw.models.Redditor"]): """Remove ``redditor`` from this relationship. :param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. """ data = {"name": str(redditor), "type": self.relationship} url = API_PATH["unfriend"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data=data) class ContributorRelationship(SubredditRelationship): """Provides methods to interact with a Subreddit's contributors. Contributors are also known as approved submitters. Contributors of a subreddit can be iterated through like so: .. code-block:: python for contributor in reddit.subreddit("redditdev").contributor(): print(contributor) """ def leave(self): """Abdicate the contributor position.""" self.subreddit._reddit.post( API_PATH["leavecontributor"], data={"id": self.subreddit.fullname} ) class ModeratorRelationship(SubredditRelationship): """Provides methods to interact with a Subreddit's moderators. Moderators of a subreddit can be iterated through like so: .. code-block:: python for moderator in reddit.subreddit("redditdev").moderator(): print(moderator) """ PERMISSIONS = {"access", "config", "flair", "mail", "posts", "wiki"} @staticmethod def _handle_permissions(permissions: List[str], other_settings: dict): other_settings = deepcopy(other_settings) if other_settings else {} other_settings["permissions"] = permissions_string( permissions, ModeratorRelationship.PERMISSIONS ) return other_settings def __call__( self, redditor: Optional[Union[str, "praw.models.Redditor"]] = None ) -> List["praw.models.Redditor"]: # pylint: disable=arguments-differ """Return a list of Redditors who are moderators. :param redditor: When provided, return a list containing at most one :class:`~.Redditor` instance. This is useful to confirm if a relationship exists, or to fetch the metadata associated with a particular relationship (default: None). .. note:: Unlike other relationship callables, this relationship is not paginated. Thus it simply returns the full list, rather than an iterator for the results. To be used like: .. code-block:: python moderators = reddit.subreddit("nameofsub").moderator() For example, to list the moderators along with their permissions try: .. code-block:: python for moderator in reddit.subreddit("SUBREDDIT").moderator(): print(f"{moderator}: {moderator.mod_permissions}") """ params = {} if redditor is None else {"user": redditor} url = API_PATH[f"list_{self.relationship}"].format(subreddit=self.subreddit) return self.subreddit._reddit.get(url, params=params) # pylint: disable=arguments-differ def add( self, redditor: Union[str, "praw.models.Redditor"], permissions: Optional[List[str]] = None, **other_settings: Any, ): """Add or invite ``redditor`` to be a moderator of the subreddit. :param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. :param permissions: When provided (not ``None``), permissions should be a list of strings specifying which subset of permissions to grant. An empty list ``[]`` indicates no permissions, and when not provided ``None``, indicates full permissions. An invite will be sent unless the user making this call is an admin user. For example, to invite ``"spez"`` with ``"posts"`` and ``"mail"`` permissions to ``r/test``, try: .. code-block:: python reddit.subreddit("test").moderator.add("spez", ["posts", "mail"]) """ other_settings = self._handle_permissions(permissions, other_settings) super().add(redditor, **other_settings) # pylint: enable=arguments-differ def invite( self, redditor: Union[str, "praw.models.Redditor"], permissions: Optional[List[str]] = None, **other_settings: Any, ): """Invite ``redditor`` to be a moderator of the subreddit. :param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. :param permissions: When provided (not ``None``), permissions should be a list of strings specifying which subset of permissions to grant. An empty list ``[]`` indicates no permissions, and when not provided ``None``, indicates full permissions. For example, to invite ``"spez"`` with ``posts`` and ``mail`` permissions to ``r/test``, try: .. code-block:: python reddit.subreddit("test").moderator.invite("spez", ["posts", "mail"]) """ data = self._handle_permissions(permissions, other_settings) data.update({"name": str(redditor), "type": "moderator_invite"}) url = API_PATH["friend"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data=data) def invited( self, redditor: Optional[Union[str, "praw.models.Redditor"]] = None, **generator_kwargs: Any, ) -> Iterator["praw.models.Redditor"]: """Return a :class:`.ListingGenerator` for Redditors invited to be moderators. :param redditor: When provided, return a list containing at most one :class:`~.Redditor` instance. This is useful to confirm if a relationship exists, or to fetch the metadata associated with a particular relationship (default: None). Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. .. note:: Unlike other usages of :class:`.ListingGenerator`, ``limit`` has no effect in the quantity returned. This endpoint always returns moderators in batches of 25 at a time regardless of what ``limit`` is set to. Usage: .. code-block:: python for invited_mod in reddit.subreddit("NAME").moderator.invited(): print(invited_mod) """ generator_kwargs["params"] = {"username": redditor} if redditor else None url = API_PATH["list_invited_moderator"].format(subreddit=self.subreddit) return ListingGenerator(self.subreddit._reddit, url, **generator_kwargs) def leave(self): """Abdicate the moderator position (use with care). For example: .. code-block:: python reddit.subreddit("subredditname").moderator.leave() """ self.remove( self.subreddit._reddit.config.username or self.subreddit._reddit.user.me() ) def remove_invite(self, redditor: Union[str, "praw.models.Redditor"]): """Remove the moderator invite for ``redditor``. :param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. For example: .. code-block:: python reddit.subreddit("subredditname").moderator.remove_invite("spez") """ data = {"name": str(redditor), "type": "moderator_invite"} url = API_PATH["unfriend"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data=data) def update( self, redditor: Union[str, "praw.models.Redditor"], permissions: Optional[List[str]] = None, ): """Update the moderator permissions for ``redditor``. :param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. :param permissions: When provided (not ``None``), permissions should be a list of strings specifying which subset of permissions to grant. An empty list ``[]`` indicates no permissions, and when not provided, ``None``, indicates full permissions. For example, to add all permissions to the moderator, try: .. code-block:: python subreddit.moderator.update("spez") To remove all permissions from the moderator, try: .. code-block:: python subreddit.moderator.update("spez", []) """ url = API_PATH["setpermissions"].format(subreddit=self.subreddit) data = self._handle_permissions( permissions, {"name": str(redditor), "type": "moderator"} ) self.subreddit._reddit.post(url, data=data) def update_invite( self, redditor: Union[str, "praw.models.Redditor"], permissions: Optional[List[str]] = None, ): """Update the moderator invite permissions for ``redditor``. :param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. :param permissions: When provided (not ``None``), permissions should be a list of strings specifying which subset of permissions to grant. An empty list ``[]`` indicates no permissions, and when not provided, ``None``, indicates full permissions. For example, to grant the ``flair``` and ``mail``` permissions to the moderator invite, try: .. code-block:: python subreddit.moderator.update_invite("spez", ["flair", "mail"]) """ url = API_PATH["setpermissions"].format(subreddit=self.subreddit) data = self._handle_permissions( permissions, {"name": str(redditor), "type": "moderator_invite"} ) self.subreddit._reddit.post(url, data=data) class Modmail: """Provides modmail functions for a subreddit. For example, to send a new modmail from the subreddit ``r/test`` to user ``u/spez`` with the subject ``test`` along with a message body of ``hello``: .. code-block:: python reddit.subreddit("test").modmail.create("test", "hello", "spez") """ def __call__( self, id: Optional[str] = None, mark_read: bool = False ): # noqa: D207, D301 """Return an individual conversation. :param id: A reddit base36 conversation ID, e.g., ``2gmz``. :param mark_read: If True, conversation is marked as read (default: False). For example: .. code-block:: python reddit.subreddit("redditdev").modmail("2gmz", mark_read=True) To print all messages from a conversation as Markdown source: .. code-block:: python conversation = reddit.subreddit("redditdev").modmail("2gmz", mark_read=True) for message in conversation.messages: print(message.body_markdown) ``ModmailConversation.user`` is a special instance of :class:`.Redditor` with extra attributes describing the non-moderator user's recent posts, comments, and modmail messages within the subreddit, as well as information on active bans and mutes. This attribute does not exist on internal moderator discussions. For example, to print the user's ban status: .. code-block:: python conversation = reddit.subreddit("redditdev").modmail("2gmz", mark_read=True) print(conversation.user.ban_status) To print a list of recent submissions by the user: .. code-block:: python conversation = reddit.subreddit("redditdev").modmail("2gmz", mark_read=True) print(conversation.user.recent_posts) """ # pylint: disable=invalid-name,redefined-builtin return ModmailConversation(self.subreddit._reddit, id=id, mark_read=mark_read) def __init__(self, subreddit: "praw.models.Subreddit"): """Construct an instance of the Modmail object.""" self.subreddit = subreddit def _build_subreddit_list( self, other_subreddits: Optional[List["praw.models.Subreddit"]] ): """Return a comma-separated list of subreddit display names.""" subreddits = [self.subreddit] + (other_subreddits or []) return ",".join(str(subreddit) for subreddit in subreddits) def bulk_read( self, other_subreddits: Optional[List[Union["praw.models.Subreddit", str]]] = None, state: Optional[str] = None, ) -> List[ModmailConversation]: """Mark conversations for subreddit(s) as read. Due to server-side restrictions, "all" is not a valid subreddit for this method. Instead, use :meth:`~.Modmail.subreddits` to get a list of subreddits using the new modmail. :param other_subreddits: A list of :class:`.Subreddit` instances for which to mark conversations (default: None). :param state: Can be one of: all, archived, highlighted, inprogress, mod, new, notifications, or appeals, (default: all). "all" does not include internal, archived, or appeals conversations. :returns: A list of :class:`.ModmailConversation` instances that were marked read. For example, to mark all notifications for a subreddit as read: .. code-block:: python subreddit = reddit.subreddit("redditdev") subreddit.modmail.bulk_read(state="notifications") """ params = {"entity": self._build_subreddit_list(other_subreddits)} if state: params["state"] = state response = self.subreddit._reddit.post( API_PATH["modmail_bulk_read"], params=params ) return [ self(conversation_id) for conversation_id in response["conversation_ids"] ] def conversations( self, after: Optional[str] = None, limit: Optional[int] = None, other_subreddits: Optional[List["praw.models.Subreddit"]] = None, sort: Optional[str] = None, state: Optional[str] = None, ) -> Generator[ModmailConversation, None, None]: # noqa: D207, D301 """Generate :class:`.ModmailConversation` objects for subreddit(s). :param after: A base36 modmail conversation id. When provided, the listing begins after this conversation (default: None). :param limit: The maximum number of conversations to fetch. If None, the server-side default is 25 at the time of writing (default: None). :param other_subreddits: A list of :class:`.Subreddit` instances for which to fetch conversations (default: None). :param sort: Can be one of: mod, recent, unread, user (default: recent). :param state: Can be one of: all, archived, highlighted, inprogress, mod, new, notifications, or appeals, (default: all). "all" does not include internal, archived, or appeals conversations. For example: .. code-block:: python conversations = reddit.subreddit("all").modmail.conversations(state="mod") """ params = {} if self.subreddit != "all": params["entity"] = self._build_subreddit_list(other_subreddits) for name, value in { "after": after, "limit": limit, "sort": sort, "state": state, }.items(): if value: params[name] = value response = self.subreddit._reddit.get( API_PATH["modmail_conversations"], params=params ) for conversation_id in response["conversationIds"]: data = { "conversation": response["conversations"][conversation_id], "messages": response["messages"], } yield ModmailConversation.parse( data, self.subreddit._reddit, convert_objects=False ) def create( self, subject: str, body: str, recipient: Union[str, "praw.models.Redditor"], author_hidden: bool = False, ) -> ModmailConversation: """Create a new modmail conversation. :param subject: The message subject. Cannot be empty. :param body: The message body. Cannot be empty. :param recipient: The recipient; a username or an instance of :class:`.Redditor`. :param author_hidden: When True, author is hidden from non-moderators (default: False). :returns: A :class:`.ModmailConversation` object for the newly created conversation. .. code-block:: python subreddit = reddit.subreddit("redditdev") redditor = reddit.redditor("bboe") subreddit.modmail.create("Subject", "Body", redditor) """ data = { "body": body, "isAuthorHidden": author_hidden, "srName": self.subreddit, "subject": subject, "to": recipient, } return self.subreddit._reddit.post(API_PATH["modmail_conversations"], data=data) def subreddits(self) -> Generator["praw.models.Subreddit", None, None]: """Yield subreddits using the new modmail that the user moderates. For example: .. code-block:: python subreddits = reddit.subreddit("all").modmail.subreddits() """ response = self.subreddit._reddit.get(API_PATH["modmail_subreddits"]) for value in response["subreddits"].values(): subreddit = self.subreddit._reddit.subreddit(value["display_name"]) subreddit.last_updated = value["lastUpdated"] yield subreddit def unread_count(self) -> Dict[str, int]: """Return unread conversation count by conversation state. At time of writing, possible states are: archived, highlighted, inprogress, mod, new, notifications, or appeals. :returns: A dict mapping conversation states to unread counts. For example, to print the count of unread moderator discussions: .. code-block:: python subreddit = reddit.subreddit("redditdev") unread_counts = subreddit.modmail.unread_count() print(unread_counts["mod"]) """ return self.subreddit._reddit.get(API_PATH["modmail_unread_count"]) class SubredditStream: """Provides submission and comment streams.""" def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditStream instance. :param subreddit: The subreddit associated with the streams. """ self.subreddit = subreddit def comments( self, **stream_options: Any ) -> Generator["praw.models.Comment", None, None]: """Yield new comments as they become available. Comments are yielded oldest first. Up to 100 historical comments will initially be returned. Keyword arguments are passed to :func:`.stream_generator`. .. note:: While PRAW tries to catch all new comments, some high-volume streams, especially the r/all stream, may drop some comments. For example, to retrieve all new comments made to the ``iama`` subreddit, try: .. code-block:: python for comment in reddit.subreddit("iama").stream.comments(): print(comment) To only retrieve new submissions starting when the stream is created, pass ``skip_existing=True``: .. code-block:: python subreddit = reddit.subreddit("iama") for comment in subreddit.stream.comments(skip_existing=True): print(comment) """ return stream_generator(self.subreddit.comments, **stream_options) def submissions( self, **stream_options: Any ) -> Generator["praw.models.Submission", None, None]: """Yield new submissions as they become available. Submissions are yielded oldest first. Up to 100 historical submissions will initially be returned. Keyword arguments are passed to :func:`.stream_generator`. .. note:: While PRAW tries to catch all new submissions, some high-volume streams, especially the r/all stream, may drop some submissions. For example, to retrieve all new submissions made to all of Reddit, try: .. code-block:: python for submission in reddit.subreddit("all").stream.submissions(): print(submission) """ return stream_generator(self.subreddit.new, **stream_options) class SubredditStylesheet: """Provides a set of stylesheet functions to a Subreddit. For example, to add the css data ``.test{color:blue}`` to the existing stylesheet: .. code-block:: python subreddit = reddit.subreddit("SUBREDDIT") stylesheet = subreddit.stylesheet() stylesheet.stylesheet += ".test{color:blue}" subreddit.stylesheet.update(stylesheet.stylesheet) """ def __call__(self) -> "praw.models.Stylesheet": """Return the subreddit's stylesheet. To be used as: .. code-block:: python stylesheet = reddit.subreddit("SUBREDDIT").stylesheet() """ url = API_PATH["about_stylesheet"].format(subreddit=self.subreddit) return self.subreddit._reddit.get(url) def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditStylesheet instance. :param subreddit: The subreddit associated with the stylesheet. An instance of this class is provided as: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet """ self.subreddit = subreddit def _update_structured_styles(self, style_data: Dict[str, Union[str, Any]]): url = API_PATH["structured_styles"].format(subreddit=self.subreddit) self.subreddit._reddit.patch(url, style_data) def _upload_image( self, image_path: str, data: Dict[str, Union[str, Any]] ) -> Dict[str, Any]: with open(image_path, "rb") as image: header = image.read(len(JPEG_HEADER)) image.seek(0) data["img_type"] = "jpg" if header == JPEG_HEADER else "png" url = API_PATH["upload_image"].format(subreddit=self.subreddit) response = self.subreddit._reddit.post( url, data=data, files={"file": image} ) if response["errors"]: error_type = response["errors"][0] error_value = response.get("errors_values", [""])[0] assert error_type in [ "BAD_CSS_NAME", "IMAGE_ERROR", ], "Please file a bug with PRAW." raise RedditAPIException([[error_type, error_value, None]]) return response def _upload_style_asset(self, image_path: str, image_type: str) -> str: data = {"imagetype": image_type, "filepath": basename(image_path)} data["mimetype"] = "image/jpeg" if image_path.lower().endswith(".png"): data["mimetype"] = "image/png" url = API_PATH["style_asset_lease"].format(subreddit=self.subreddit) upload_lease = self.subreddit._reddit.post(url, data=data)["s3UploadLease"] upload_data = {item["name"]: item["value"] for item in upload_lease["fields"]} upload_url = f"https:{upload_lease["action"]}" with open(image_path, "rb") as image: response = self.subreddit._reddit._core._requestor._http.post( upload_url, data=upload_data, files={"file": image} ) response.raise_for_status() return f"{upload_url}/{upload_data["key"]}" def delete_banner(self): """Remove the current subreddit (redesign) banner image. Succeeds even if there is no banner image. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.delete_banner() """ data = {"bannerBackgroundImage": ""} self._update_structured_styles(data) def delete_banner_additional_image(self): """Remove the current subreddit (redesign) banner additional image. Succeeds even if there is no additional image. Will also delete any configured hover image. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.delete_banner_additional_image() """ data = {"bannerPositionedImage": "", "secondaryBannerPositionedImage": ""} self._update_structured_styles(data) def delete_banner_hover_image(self): """Remove the current subreddit (redesign) banner hover image. Succeeds even if there is no hover image. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.delete_banner_hover_image() """ data = {"secondaryBannerPositionedImage": ""} self._update_structured_styles(data) def delete_header(self): """Remove the current subreddit header image. Succeeds even if there is no header image. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.delete_header() """ url = API_PATH["delete_sr_header"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url) def delete_image(self, name: str): """Remove the named image from the subreddit. Succeeds even if the named image does not exist. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.delete_image("smile") """ url = API_PATH["delete_sr_image"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data={"img_name": name}) def delete_mobile_header(self): """Remove the current subreddit mobile header. Succeeds even if there is no mobile header. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.delete_mobile_header() """ url = API_PATH["delete_sr_header"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url) def delete_mobile_icon(self): """Remove the current subreddit mobile icon. Succeeds even if there is no mobile icon. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.delete_mobile_icon() """ url = API_PATH["delete_sr_icon"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url) def update(self, stylesheet: str, reason: Optional[str] = None): """Update the subreddit's stylesheet. :param stylesheet: The CSS for the new stylesheet. :param reason: The reason for updating the stylesheet. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.update( "p { color: green; }", "color text green" ) """ data = {"op": "save", "reason": reason, "stylesheet_contents": stylesheet} url = API_PATH["subreddit_stylesheet"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data=data) def upload(self, name: str, image_path: str) -> Dict[str, str]: """Upload an image to the Subreddit. :param name: The name to use for the image. If an image already exists with the same name, it will be replaced. :param image_path: A path to a jpeg or png image. :returns: A dictionary containing a link to the uploaded image under the key ``img_src``. :raises: ``prawcore.TooLarge`` if the overall request body is too large. :raises: :class:`.RedditAPIException` if there are other issues with the uploaded image. Unfortunately the exception info might not be very specific, so try through the website with the same image to see what the problem actually might be. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.upload("smile", "img.png") """ return self._upload_image(image_path, {"name": name, "upload_type": "img"}) def upload_banner(self, image_path: str): """Upload an image for the subreddit's (redesign) banner image. :param image_path: A path to a jpeg or png image. :raises: ``prawcore.TooLarge`` if the overall request body is too large. :raises: :class:`.RedditAPIException` if there are other issues with the uploaded image. Unfortunately the exception info might not be very specific, so try through the website with the same image to see what the problem actually might be. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.upload_banner("banner.png") """ image_type = "bannerBackgroundImage" image_url = self._upload_style_asset(image_path, image_type) self._update_structured_styles({image_type: image_url}) def upload_banner_additional_image( self, image_path: str, align: Optional[str] = None ): """Upload an image for the subreddit's (redesign) additional image. :param image_path: A path to a jpeg or png image. :param align: Either ``left``, ``centered``, or ``right``. (default: ``left``). :raises: ``prawcore.TooLarge`` if the overall request body is too large. :raises: :class:`.RedditAPIException` if there are other issues with the uploaded image. Unfortunately the exception info might not be very specific, so try through the website with the same image to see what the problem actually might be. For example: .. code-block:: python subreddit = reddit.subreddit("SUBREDDIT") subreddit.stylesheet.upload_banner_additional_image("banner.png") """ alignment = {} if align is not None: if align not in {"left", "centered", "right"}: raise ValueError( "align argument must be either `left`, `centered`, or `right`" ) alignment["bannerPositionedImagePosition"] = align image_type = "bannerPositionedImage" image_url = self._upload_style_asset(image_path, image_type) style_data = {image_type: image_url} if alignment: style_data.update(alignment) self._update_structured_styles(style_data) def upload_banner_hover_image(self, image_path: str): """Upload an image for the subreddit's (redesign) additional image. :param image_path: A path to a jpeg or png image. Fails if the Subreddit does not have an additional image defined :raises: ``prawcore.TooLarge`` if the overall request body is too large. :raises: :class:`.RedditAPIException` if there are other issues with the uploaded image. Unfortunately the exception info might not be very specific, so try through the website with the same image to see what the problem actually might be. For example: .. code-block:: python subreddit = reddit.subreddit("SUBREDDIT") subreddit.stylesheet.upload_banner_hover_image("banner.png") """ image_type = "secondaryBannerPositionedImage" image_url = self._upload_style_asset(image_path, image_type) self._update_structured_styles({image_type: image_url}) def upload_header(self, image_path: str) -> Dict[str, str]: """Upload an image to be used as the Subreddit's header image. :param image_path: A path to a jpeg or png image. :returns: A dictionary containing a link to the uploaded image under the key ``img_src``. :raises: ``prawcore.TooLarge`` if the overall request body is too large. :raises: :class:`.RedditAPIException` if there are other issues with the uploaded image. Unfortunately the exception info might not be very specific, so try through the website with the same image to see what the problem actually might be. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.upload_header("header.png") """ return self._upload_image(image_path, {"upload_type": "header"}) def upload_mobile_header(self, image_path: str) -> Dict[str, str]: """Upload an image to be used as the Subreddit's mobile header. :param image_path: A path to a jpeg or png image. :returns: A dictionary containing a link to the uploaded image under the key ``img_src``. :raises: ``prawcore.TooLarge`` if the overall request body is too large. :raises: :class:`.RedditAPIException` if there are other issues with the uploaded image. Unfortunately the exception info might not be very specific, so try through the website with the same image to see what the problem actually might be. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.upload_mobile_header("header.png") """ return self._upload_image(image_path, {"upload_type": "banner"}) def upload_mobile_icon(self, image_path: str) -> Dict[str, str]: """Upload an image to be used as the Subreddit's mobile icon. :param image_path: A path to a jpeg or png image. :returns: A dictionary containing a link to the uploaded image under the key ``img_src``. :raises: ``prawcore.TooLarge`` if the overall request body is too large. :raises: :class:`.RedditAPIException` if there are other issues with the uploaded image. Unfortunately the exception info might not be very specific, so try through the website with the same image to see what the problem actually might be. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.upload_mobile_icon("icon.png") """ return self._upload_image(image_path, {"upload_type": "icon"}) class SubredditWiki: """Provides a set of wiki functions to a Subreddit.""" def __getitem__(self, page_name: str) -> WikiPage: """Lazily return the WikiPage for the subreddit named ``page_name``. This method is to be used to fetch a specific wikipage, like so: .. code-block:: python wikipage = reddit.subreddit("iama").wiki["proof"] print(wikipage.content_md) """ return WikiPage(self.subreddit._reddit, self.subreddit, page_name.lower()) def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditWiki instance. :param subreddit: The subreddit whose wiki to work with. """ self.banned = SubredditRelationship(subreddit, "wikibanned") self.contributor = SubredditRelationship(subreddit, "wikicontributor") self.subreddit = subreddit def __iter__(self) -> Generator[WikiPage, None, None]: """Iterate through the pages of the wiki. This method is to be used to discover all wikipages for a subreddit: .. code-block:: python for wikipage in reddit.subreddit("iama").wiki: print(wikipage) """ response = self.subreddit._reddit.get( API_PATH["wiki_pages"].format(subreddit=self.subreddit), params={"unique": self.subreddit._reddit._next_unique}, ) for page_name in response["data"]: yield WikiPage(self.subreddit._reddit, self.subreddit, page_name) def create( self, name: str, content: str, reason: Optional[str] = None, **other_settings: Any, ): """Create a new wiki page. :param name: The name of the new WikiPage. This name will be normalized. :param content: The content of the new WikiPage. :param reason: (Optional) The reason for the creation. :param other_settings: Additional keyword arguments to pass. To create the wiki page ``praw_test`` in ``r/test`` try: .. code-block:: python reddit.subreddit("test").wiki.create( "praw_test", "wiki body text", reason="PRAW Test Creation" ) """ name = name.replace(" ", "_").lower() new = WikiPage(self.subreddit._reddit, self.subreddit, name) new.edit(content=content, reason=reason, **other_settings) return new def revisions( self, **generator_kwargs: Any ) -> Generator[ Dict[str, Optional[Union["praw.models.Redditor", WikiPage, str, int, bool]]], None, None, ]: """Return a :class:`.ListingGenerator` for recent wiki revisions. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. To view the wiki revisions for ``"praw_test"`` in ``r/test`` try: .. code-block:: python for item in reddit.subreddit("test").wiki["praw_test"].revisions(): print(item) """ url = API_PATH["wiki_revisions"].format(subreddit=self.subreddit) return WikiPage._revision_generator(self.subreddit, url, generator_kwargs)
"""Provide the Subreddit class.""" # pylint: disable=too-many-lines import socket from copy import deepcopy from csv import writer from io import StringIO from json import dumps, loads from os.path import basename, dirname, isfile, join from typing import TYPE_CHECKING, Any, Dict, Generator, Iterator, List, Optional, Union from urllib.parse import urljoin from xml.etree.ElementTree import XML import websocket from prawcore import Redirect from requests import Response from ...const import API_PATH, JPEG_HEADER from ...exceptions import ( ClientException, InvalidFlairTemplateID, MediaPostFailed, RedditAPIException, TooLargeMediaException, WebSocketException, ) from ...util.cache import cachedproperty from ..listing.generator import ListingGenerator from ..listing.mixins import SubredditListingMixin from ..util import permissions_string, stream_generator from .base import RedditBase from .emoji import SubredditEmoji from .mixins import FullnameMixin, MessageableMixin from .modmail import ModmailConversation from .removal_reasons import SubredditRemovalReasons from .rules import SubredditRules from .widgets import SubredditWidgets, WidgetEncoder from .wikipage import WikiPage if TYPE_CHECKING: # pragma: no cover from .... import praw class Subreddit(MessageableMixin, SubredditListingMixin, FullnameMixin, RedditBase): """A class for Subreddits. To obtain an instance of this class for subreddit ``r/redditdev`` execute: .. code-block:: python subreddit = reddit.subreddit("redditdev") While ``r/all`` is not a real subreddit, it can still be treated like one. The following outputs the titles of the 25 hottest submissions in ``r/all``: .. code-block:: python for submission in reddit.subreddit("all").hot(limit=25): print(submission.title) Multiple subreddits can be combined with a ``+`` like so: .. code-block:: python for submission in reddit.subreddit("redditdev+learnpython").top("all"): print(submission) Subreddits can be filtered from combined listings as follows. .. note:: These filters are ignored by certain methods, including :attr:`.comments`, :meth:`.gilded`, and :meth:`.SubredditStream.comments`. .. code-block:: python for submission in reddit.subreddit("all-redditdev").new(): print(submission) **Typical Attributes** This table describes attributes that typically belong to objects of this class. Since attributes are dynamically provided (see :ref:`determine-available-attributes-of-an-object`), there is not a guarantee that these attributes will always be present, nor is this list necessarily complete. ========================= ========================================================== Attribute Description ========================= ========================================================== ``can_assign_link_flair`` Whether users can assign their own link flair. ``can_assign_user_flair`` Whether users can assign their own user flair. ``created_utc`` Time the subreddit was created, represented in `Unix Time`_. ``description`` Subreddit description, in Markdown. ``description_html`` Subreddit description, in HTML. ``display_name`` Name of the subreddit. ``id`` ID of the subreddit. ``name`` Fullname of the subreddit. ``over18`` Whether the subreddit is NSFW. ``public_description`` Description of the subreddit, shown in searches and on the "You must be invited to visit this community" page (if applicable). ``spoilers_enabled`` Whether the spoiler tag feature is enabled. ``subscribers`` Count of subscribers. ``user_is_banned`` Whether the authenticated user is banned. ``user_is_moderator`` Whether the authenticated user is a moderator. ``user_is_subscriber`` Whether the authenticated user is subscribed. ========================= ========================================================== .. note:: Trying to retrieve attributes of quarantined or private subreddits will result in a 403 error. Trying to retrieve attributes of a banned subreddit will result in a 404 error. .. _unix time: https://en.wikipedia.org/wiki/Unix_time """ # pylint: disable=too-many-public-methods STR_FIELD = "display_name" MESSAGE_PREFIX = "#" @staticmethod def _create_or_update( _reddit, allow_images=None, allow_post_crossposts=None, allow_top=None, collapse_deleted_comments=None, comment_score_hide_mins=None, description=None, domain=None, exclude_banned_modqueue=None, header_hover_text=None, hide_ads=None, lang=None, key_color=None, link_type=None, name=None, over_18=None, public_description=None, public_traffic=None, show_media=None, show_media_preview=None, spam_comments=None, spam_links=None, spam_selfposts=None, spoilers_enabled=None, sr=None, submit_link_label=None, submit_text=None, submit_text_label=None, subreddit_type=None, suggested_comment_sort=None, title=None, wiki_edit_age=None, wiki_edit_karma=None, wikimode=None, **other_settings, ): # pylint: disable=invalid-name,too-many-locals,too-many-arguments model = { "allow_images": allow_images, "allow_post_crossposts": allow_post_crossposts, "allow_top": allow_top, "collapse_deleted_comments": collapse_deleted_comments, "comment_score_hide_mins": comment_score_hide_mins, "description": description, "domain": domain, "exclude_banned_modqueue": exclude_banned_modqueue, "header-title": header_hover_text, # Remap here - better name "hide_ads": hide_ads, "key_color": key_color, "lang": lang, "link_type": link_type, "name": name, "over_18": over_18, "public_description": public_description, "public_traffic": public_traffic, "show_media": show_media, "show_media_preview": show_media_preview, "spam_comments": spam_comments, "spam_links": spam_links, "spam_selfposts": spam_selfposts, "spoilers_enabled": spoilers_enabled, "sr": sr, "submit_link_label": submit_link_label, "submit_text": submit_text, "submit_text_label": submit_text_label, "suggested_comment_sort": suggested_comment_sort, "title": title, "type": subreddit_type, "wiki_edit_age": wiki_edit_age, "wiki_edit_karma": wiki_edit_karma, "wikimode": wikimode, } model.update(other_settings) _reddit.post(API_PATH["site_admin"], data=model) @staticmethod def _subreddit_list(subreddit, other_subreddits): if other_subreddits: return ",".join([str(subreddit)] + [str(x) for x in other_subreddits]) return str(subreddit) @staticmethod def _validate_gallery(images): for image in images: image_path = image.get("image_path", "") if image_path: if not isfile(image_path): raise TypeError(f"{image_path!r} is not a valid image path.") else: raise TypeError("'image_path' is required.") if not len(image.get("caption", "")) <= 180: raise TypeError("Caption must be 180 characters or less.") @staticmethod def _validate_inline_media(inline_media: "praw.models.InlineMedia"): if not isfile(inline_media.path): raise ValueError(f"{inline_media.path!r} is not a valid file path.") @property def _kind(self) -> str: """Return the class's kind.""" return self._reddit.config.kinds["subreddit"] @cachedproperty def banned(self) -> "praw.models.reddit.subreddit.SubredditRelationship": """Provide an instance of :class:`.SubredditRelationship`. For example, to ban a user try: .. code-block:: python reddit.subreddit("SUBREDDIT").banned.add("NAME", ban_reason="...") To list the banned users along with any notes, try: .. code-block:: python for ban in reddit.subreddit("SUBREDDIT").banned(): print(f"{ban}: {ban.note}") """ return SubredditRelationship(self, "banned") @cachedproperty def collections(self) -> "praw.models.reddit.collections.SubredditCollections": r"""Provide an instance of :class:`.SubredditCollections`. To see the permalinks of all :class:`.Collection`\ s that belong to a subreddit, try: .. code-block:: python for collection in reddit.subreddit("SUBREDDIT").collections: print(collection.permalink) To get a specific :class:`.Collection` by its UUID or permalink, use one of the following: .. code-block:: python collection = reddit.subreddit("SUBREDDIT").collections("some_uuid") collection = reddit.subreddit("SUBREDDIT").collections( permalink="https://reddit.com/r/SUBREDDIT/collection/some_uuid" ) """ return self._subreddit_collections_class(self._reddit, self) @cachedproperty def contributor(self) -> "praw.models.reddit.subreddit.ContributorRelationship": """Provide an instance of :class:`.ContributorRelationship`. Contributors are also known as approved submitters. To add a contributor try: .. code-block:: python reddit.subreddit("SUBREDDIT").contributor.add("NAME") """ return ContributorRelationship(self, "contributor") @cachedproperty def emoji(self) -> SubredditEmoji: """Provide an instance of :class:`.SubredditEmoji`. This attribute can be used to discover all emoji for a subreddit: .. code-block:: python for emoji in reddit.subreddit("iama").emoji: print(emoji) A single emoji can be lazily retrieved via: .. code-block:: python reddit.subreddit("blah").emoji["emoji_name"] .. note:: Attempting to access attributes of an nonexistent emoji will result in a :class:`.ClientException`. """ return SubredditEmoji(self) @cachedproperty def filters(self) -> "praw.models.reddit.subreddit.SubredditFilters": """Provide an instance of :class:`.SubredditFilters`. For example, to add a filter, run: .. code-block:: python reddit.subreddit("all").filters.add("subreddit_name") """ return SubredditFilters(self) @cachedproperty def flair(self) -> "praw.models.reddit.subreddit.SubredditFlair": """Provide an instance of :class:`.SubredditFlair`. Use this attribute for interacting with a subreddit's flair. For example, to list all the flair for a subreddit which you have the ``flair`` moderator permission on try: .. code-block:: python for flair in reddit.subreddit("NAME").flair(): print(flair) Flair templates can be interacted with through this attribute via: .. code-block:: python for template in reddit.subreddit("NAME").flair.templates: print(template) """ return SubredditFlair(self) @cachedproperty def mod(self) -> "praw.models.reddit.subreddit.SubredditModeration": """Provide an instance of :class:`.SubredditModeration`. For example, to accept a moderation invite from subreddit ``r/test``: .. code-block:: python reddit.subreddit("test").mod.accept_invite() """ return SubredditModeration(self) @cachedproperty def moderator(self) -> "praw.models.reddit.subreddit.ModeratorRelationship": """Provide an instance of :class:`.ModeratorRelationship`. For example, to add a moderator try: .. code-block:: python reddit.subreddit("SUBREDDIT").moderator.add("NAME") To list the moderators along with their permissions try: .. code-block:: python for moderator in reddit.subreddit("SUBREDDIT").moderator(): print(f"{moderator}: {moderator.mod_permissions}") """ return ModeratorRelationship(self, "moderator") @cachedproperty def modmail(self) -> "praw.models.reddit.subreddit.Modmail": """Provide an instance of :class:`.Modmail`. For example, to send a new modmail from the subreddit ``r/test`` to user ``u/spez`` with the subject ``test`` along with a message body of ``hello``: .. code-block:: python reddit.subreddit("test").modmail.create("test", "hello", "spez") """ return Modmail(self) @cachedproperty def muted(self) -> "praw.models.reddit.subreddit.SubredditRelationship": """Provide an instance of :class:`.SubredditRelationship`. For example, muted users can be iterated through like so: .. code-block:: python for mute in reddit.subreddit("redditdev").muted(): print(f"{mute}: {mute.note}") """ return SubredditRelationship(self, "muted") @cachedproperty def quaran(self) -> "praw.models.reddit.subreddit.SubredditQuarantine": """Provide an instance of :class:`.SubredditQuarantine`. This property is named ``quaran`` because ``quarantine`` is a Subreddit attribute returned by Reddit to indicate whether or not a Subreddit is quarantined. To opt-in into a quarantined subreddit: .. code-block:: python reddit.subreddit("test").quaran.opt_in() """ return SubredditQuarantine(self) @cachedproperty def rules(self) -> SubredditRules: """Provide an instance of :class:`.SubredditRules`. Use this attribute for interacting with a subreddit's rules. For example, to list all the rules for a subreddit: .. code-block:: python for rule in reddit.subreddit("AskReddit").rules: print(rule) Moderators can also add rules to the subreddit. For example, to make a rule called ``"No spam"`` in the subreddit ``"NAME"``: .. code-block:: python reddit.subreddit("NAME").rules.mod.add( short_name="No spam", kind="all", description="Do not spam. Spam bad" ) """ return SubredditRules(self) @cachedproperty def stream(self) -> "praw.models.reddit.subreddit.SubredditStream": """Provide an instance of :class:`.SubredditStream`. Streams can be used to indefinitely retrieve new comments made to a subreddit, like: .. code-block:: python for comment in reddit.subreddit("iama").stream.comments(): print(comment) Additionally, new submissions can be retrieved via the stream. In the following example all submissions are fetched via the special subreddit ``r/all``: .. code-block:: python for submission in reddit.subreddit("all").stream.submissions(): print(submission) """ return SubredditStream(self) @cachedproperty def stylesheet(self) -> "praw.models.reddit.subreddit.SubredditStylesheet": """Provide an instance of :class:`.SubredditStylesheet`. For example, to add the css data ``.test{color:blue}`` to the existing stylesheet: .. code-block:: python subreddit = reddit.subreddit("SUBREDDIT") stylesheet = subreddit.stylesheet() stylesheet.stylesheet += ".test{color:blue}" subreddit.stylesheet.update(stylesheet.stylesheet) """ return SubredditStylesheet(self) @cachedproperty def widgets(self) -> "praw.models.SubredditWidgets": """Provide an instance of :class:`.SubredditWidgets`. **Example usage** Get all sidebar widgets: .. code-block:: python for widget in reddit.subreddit("redditdev").widgets.sidebar: print(widget) Get ID card widget: .. code-block:: python print(reddit.subreddit("redditdev").widgets.id_card) """ return SubredditWidgets(self) @cachedproperty def wiki(self) -> "praw.models.reddit.subreddit.SubredditWiki": """Provide an instance of :class:`.SubredditWiki`. This attribute can be used to discover all wikipages for a subreddit: .. code-block:: python for wikipage in reddit.subreddit("iama").wiki: print(wikipage) To fetch the content for a given wikipage try: .. code-block:: python wikipage = reddit.subreddit("iama").wiki["proof"] print(wikipage.content_md) """ return SubredditWiki(self) def __init__( self, reddit: "praw.Reddit", display_name: Optional[str] = None, _data: Optional[Dict[str, Any]] = None, ): """Initialize a Subreddit instance. :param reddit: An instance of :class:`~.Reddit`. :param display_name: The name of the subreddit. .. note:: This class should not be initialized directly. Instead obtain an instance via: ``reddit.subreddit("subreddit_name")`` """ if (display_name, _data).count(None) != 1: raise TypeError("Either `display_name` or `_data` must be provided.") if display_name: self.display_name = display_name super().__init__(reddit, _data=_data) self._path = API_PATH["subreddit"].format(subreddit=self) def _convert_to_fancypants(self, markdown_text: str) -> dict: """Convert a Markdown string to a dict for use with the ``richtext_json`` param. :param markdown_text: A Markdown string to convert. :returns: A dict in ``richtext_json`` format. """ text_data = {"output_mode": "rtjson", "markdown_text": markdown_text} return self._reddit.post(API_PATH["convert_rte_body"], text_data)["output"] def _fetch_info(self): return "subreddit_about", {"subreddit": self}, None def _fetch_data(self) -> dict: name, fields, params = self._fetch_info() path = API_PATH[name].format(**fields) return self._reddit.request("GET", path, params) def _fetch(self): data = self._fetch_data() data = data["data"] other = type(self)(self._reddit, _data=data) self.__dict__.update(other.__dict__) self._fetched = True def _parse_xml_response(self, response: Response): """Parse the XML from a response and raise any errors found.""" xml = response.text root = XML(xml) tags = [element.tag for element in root] if tags[:4] == ["Code", "Message", "ProposedSize", "MaxSizeAllowed"]: # Returned if image is too big code, message, actual, maximum_size = [element.text for element in root[:4]] raise TooLargeMediaException(int(maximum_size), int(actual)) def _submit_media(self, data: dict, timeout: int, websocket_url: str = None): """Submit and return an `image`, `video`, or `videogif`. This is a helper method for submitting posts that are not link posts or self posts. """ connection = None if websocket_url is not None: try: connection = websocket.create_connection(websocket_url, timeout=timeout) except ( websocket.WebSocketException, socket.error, BlockingIOError, ) as ws_exception: raise WebSocketException( "Error establishing websocket connection.", ws_exception ) self._reddit.post(API_PATH["submit"], data=data) if connection is None: return try: ws_update = loads(connection.recv()) connection.close() except ( websocket.WebSocketException, socket.error, BlockingIOError, ) as ws_exception: raise WebSocketException( "Websocket error. Check your media file. Your post may still have been" " created.", ws_exception, ) if ws_update.get("type") == "failed": raise MediaPostFailed url = ws_update["payload"]["redirect"] return self._reddit.submission(url=url) def _upload_media( self, media_path: str, expected_mime_prefix: Optional[str] = None, upload_type: str = "link", ): """Upload media and return its URL and a websocket (Undocumented endpoint). :param expected_mime_prefix: If provided, enforce that the media has a mime type that starts with the provided prefix. :param upload_type: One of ``link``, ``gallery'', or ``selfpost``. (default: ``link``) :returns: A tuple containing ``(media_url, websocket_url)`` for the piece of media. The websocket URL can be used to determine when media processing is finished, or it can be ignored. """ if media_path is None: media_path = join( dirname(dirname(dirname(__file__))), "images", "PRAW logo.png" ) file_name = basename(media_path).lower() file_extension = file_name.rpartition(".")[2] mime_type = { "png": "image/png", "mov": "video/quicktime", "mp4": "video/mp4", "jpg": "image/jpeg", "jpeg": "image/jpeg", "gif": "image/gif", }.get( file_extension, "image/jpeg" ) # default to JPEG if ( expected_mime_prefix is not None and mime_type.partition("/")[0] != expected_mime_prefix ): raise ClientException( f"Expected a mimetype starting with {expected_mime_prefix!r} but got" f" mimetype {mime_type!r} (from file extension {file_extension!r})." ) img_data = {"filepath": file_name, "mimetype": mime_type} url = API_PATH["media_asset"] # until we learn otherwise, assume this request always succeeds upload_response = self._reddit.post(url, data=img_data) upload_lease = upload_response["args"] upload_url = f"https:{upload_lease['action']}" upload_data = {item["name"]: item["value"] for item in upload_lease["fields"]} with open(media_path, "rb") as media: response = self._reddit._core._requestor._http.post( upload_url, data=upload_data, files={"file": media} ) if not response.ok: self._parse_xml_response(response) response.raise_for_status() websocket_url = upload_response["asset"]["websocket_url"] if upload_type == "link": return f"{upload_url}/{upload_data['key']}", websocket_url else: return upload_response["asset"]["asset_id"], websocket_url def _upload_inline_media(self, inline_media: "praw.models.InlineMedia"): """Upload media for use in self posts and return ``inline_media``. :param inline_media: An :class:`.InlineMedia` object to validate and upload. """ self._validate_inline_media(inline_media) inline_media.media_id = self._upload_media( inline_media.path, upload_type="selfpost" )[0] return inline_media def post_requirements(self) -> Dict[str, Union[str, int, bool]]: """Get the post requirements for a subreddit. :returns: A dict with the various requirements. The returned dict contains the following keys: - ``domain_blacklist`` - ``body_restriction_policy`` - ``domain_whitelist`` - ``title_regexes`` - ``body_blacklisted_strings`` - ``body_required_strings`` - ``title_text_min_length`` - ``is_flair_required`` - ``title_text_max_length`` - ``body_regexes`` - ``link_repost_age`` - ``body_text_min_length`` - ``link_restriction_policy`` - ``body_text_max_length`` - ``title_required_strings`` - ``title_blacklisted_strings`` - ``guidelines_text`` - ``guidelines_display_policy`` For example, to fetch the post requirements for ``r/test``: .. code-block:: python print(reddit.subreddit("test").post_requirements) """ return self._reddit.get( API_PATH["post_requirements"].format(subreddit=str(self)) ) def random(self) -> Union["praw.models.Submission", None]: """Return a random Submission. Returns ``None`` on subreddits that do not support the random feature. One example, at the time of writing, is ``r/wallpapers``. For example, to get a random submission off of ``r/AskReddit``: .. code-block:: python submission = reddit.subreddit("AskReddit").random() print(submission.title) """ url = API_PATH["subreddit_random"].format(subreddit=self) try: self._reddit.get(url, params={"unique": self._reddit._next_unique}) except Redirect as redirect: path = redirect.path try: return self._submission_class( self._reddit, url=urljoin(self._reddit.config.reddit_url, path) ) except ClientException: return None def search( self, query: str, sort: str = "relevance", syntax: str = "lucene", time_filter: str = "all", **generator_kwargs: Any, ) -> Iterator["praw.models.Submission"]: """Return a :class:`.ListingGenerator` for items that match ``query``. :param query: The query string to search for. :param sort: Can be one of: relevance, hot, top, new, comments. (default: relevance). :param syntax: Can be one of: cloudsearch, lucene, plain (default: lucene). :param time_filter: Can be one of: all, day, hour, month, week, year (default: all). For more information on building a search query see: https://www.reddit.com/wiki/search For example, to search all subreddits for ``praw`` try: .. code-block:: python for submission in reddit.subreddit("all").search("praw"): print(submission.title) """ self._validate_time_filter(time_filter) not_all = self.display_name.lower() != "all" self._safely_add_arguments( generator_kwargs, "params", q=query, restrict_sr=not_all, sort=sort, syntax=syntax, t=time_filter, ) url = API_PATH["search"].format(subreddit=self) return ListingGenerator(self._reddit, url, **generator_kwargs) def sticky(self, number: int = 1) -> "praw.models.Submission": """Return a Submission object for a sticky of the subreddit. :param number: Specify which sticky to return. 1 appears at the top (default: 1). :raises: ``prawcore.NotFound`` if the sticky does not exist. For example, to get the stickied post on the subreddit ``r/test``: .. code-block:: python reddit.subreddit("test").sticky() """ url = API_PATH["about_sticky"].format(subreddit=self) try: self._reddit.get(url, params={"num": number}) except Redirect as redirect: path = redirect.path return self._submission_class( self._reddit, url=urljoin(self._reddit.config.reddit_url, path) ) def submit( self, title: str, selftext: Optional[str] = None, url: Optional[str] = None, flair_id: Optional[str] = None, flair_text: Optional[str] = None, resubmit: bool = True, send_replies: bool = True, nsfw: bool = False, spoiler: bool = False, collection_id: Optional[str] = None, discussion_type: Optional[str] = None, inline_media: Optional[Dict[str, "praw.models.InlineMedia"]] = None, ) -> "praw.models.Submission": # noqa: D301 r"""Add a submission to the subreddit. :param title: The title of the submission. :param selftext: The Markdown formatted content for a ``text`` submission. Use an empty string, ``""``, to make a title-only submission. :param url: The URL for a ``link`` submission. :param collection_id: The UUID of a :class:`.Collection` to add the newly-submitted post to. :param flair_id: The flair template to select (default: None). :param flair_text: If the template's ``flair_text_editable`` value is True, this value will set a custom text (default: None). ``flair_id`` is required when ``flair_text`` is provided. :param resubmit: When False, an error will occur if the URL has already been submitted (default: True). :param send_replies: When True, messages will be sent to the submission author when comments are made to the submission (default: True). :param nsfw: Whether or not the submission should be marked NSFW (default: False). :param spoiler: Whether or not the submission should be marked as a spoiler (default: False). :param discussion_type: Set to ``CHAT`` to enable live discussion instead of traditional comments (default: None). :param inline_media: A dict of :class:`.InlineMedia` objects where the key is the placeholder name in ``selftext``. :returns: A :class:`~.Submission` object for the newly created submission. Either ``selftext`` or ``url`` can be provided, but not both. For example, to submit a URL to ``r/reddit_api_test`` do: .. code-block:: python title = "PRAW documentation" url = "https://praw.readthedocs.io" reddit.subreddit("reddit_api_test").submit(title, url=url) For example, to submit a self post with inline media do: .. code-block:: python from praw.models import InlineGif, InlineImage, InlineVideo gif = InlineGif("path/to/image.gif", "optional caption") image = InlineImage("path/to/image.jpg", "optional caption") video = InlineVideo("path/to/video.mp4", "optional caption") selftext = "Text with a gif {gif1} an image {image1} and a video {video1} inline" media = {"gif1": gif, "image1": image, "video1": video} reddit.subreddit("redditdev").submit("title", selftext=selftext, inline_media=media) .. note:: Inserted media will have a padding of ``\\n\\n`` automatically added. This is due to the weirdness with Reddit's API. Using the example above, the result selftext body will look like so: .. code-block:: Text with a gif ![gif](u1rchuphryq51 "optional caption") an image ![img](srnr8tshryq51 "optional caption") and video ![video](gmc7rvthryq51 "optional caption") inline .. seealso:: - :meth:`.submit_image` to submit images - :meth:`.submit_video` to submit videos and videogifs - :meth:`.submit_poll` to submit polls - :meth:`.submit_gallery`. to submit more than one image in the same post """ if (bool(selftext) or selftext == "") == bool(url): raise TypeError("Either `selftext` or `url` must be provided.") data = { "sr": str(self), "resubmit": bool(resubmit), "sendreplies": bool(send_replies), "title": title, "nsfw": bool(nsfw), "spoiler": bool(spoiler), "validate_on_submit": self._reddit.validate_on_submit, } for key, value in ( ("flair_id", flair_id), ("flair_text", flair_text), ("collection_id", collection_id), ("discussion_type", discussion_type), ): if value is not None: data[key] = value if selftext is not None: data.update(kind="self") if inline_media: body = selftext.format( **{ placeholder: self._upload_inline_media(media) for placeholder, media in inline_media.items() } ) converted = self._convert_to_fancypants(body) data.update(richtext_json=dumps(converted)) else: data.update(text=selftext) else: data.update(kind="link", url=url) return self._reddit.post(API_PATH["submit"], data=data) def submit_gallery( self, title: str, images: List[Dict[str, str]], *, collection_id: Optional[str] = None, discussion_type: Optional[str] = None, flair_id: Optional[str] = None, flair_text: Optional[str] = None, nsfw: bool = False, send_replies: bool = True, spoiler: bool = False, ): """Add an image gallery submission to the subreddit. :param title: The title of the submission. :param images: The images to post in dict with the following structure: ``{"image_path": "path", "caption": "caption", "outbound_url": "url"}``, only ``"image_path"`` is required. :param collection_id: The UUID of a :class:`.Collection` to add the newly-submitted post to. :param discussion_type: Set to ``CHAT`` to enable live discussion instead of traditional comments (default: None). :param flair_id: The flair template to select (default: None). :param flair_text: If the template's ``flair_text_editable`` value isTrue, this value will set a custom text (default: None). ``flair_id`` is required when ``flair_text`` is provided. :param nsfw: Whether or not the submission should be marked NSFW (default: False). :param send_replies: When True, messages will be sent to the submission author when comments are made to the submission (default: True). :param spoiler: Whether or not the submission should be marked asa spoiler (default: False). :returns: A :class:`.Submission` object for the newly created submission. :raises: :class:`.ClientException` if ``image_path`` in ``images`` refers to a file that is not an image. For example, to submit an image gallery to ``r/reddit_api_test`` do: .. code-block:: python title = "My favorite pictures" image = "/path/to/image.png" image2 = "/path/to/image2.png" image3 = "/path/to/image3.png" images = [ {"image_path": image}, { "image_path": image2, "caption": "Image caption 2", }, { "image_path": image3, "caption": "Image caption 3", "outbound_url": "https://example.com/link3", }, ] reddit.subreddit("reddit_api_test").submit_gallery(title, images) .. seealso:: - :meth:`.submit` to submit url posts and selftexts - :meth:`.submit_image`. to submit single images - :meth:`.submit_poll` to submit polls - :meth:`.submit_video`. to submit videos and videogifs """ self._validate_gallery(images) data = { "api_type": "json", "items": [], "nsfw": bool(nsfw), "sendreplies": bool(send_replies), "show_error_list": True, "spoiler": bool(spoiler), "sr": str(self), "title": title, "validate_on_submit": self._reddit.validate_on_submit, } for key, value in ( ("flair_id", flair_id), ("flair_text", flair_text), ("collection_id", collection_id), ("discussion_type", discussion_type), ): if value is not None: data[key] = value for image in images: data["items"].append( { "caption": image.get("caption", ""), "outbound_url": image.get("outbound_url", ""), "media_id": self._upload_media( image["image_path"], expected_mime_prefix="image", upload_type="gallery", )[0], } ) response = self._reddit.request( "POST", API_PATH["submit_gallery_post"], json=data )["json"] if response["errors"]: raise RedditAPIException(response["errors"]) else: return self._reddit.submission(url=response["data"]["url"]) def submit_image( self, title: str, image_path: str, flair_id: Optional[str] = None, flair_text: Optional[str] = None, resubmit: bool = True, send_replies: bool = True, nsfw: bool = False, spoiler: bool = False, timeout: int = 10, collection_id: Optional[str] = None, without_websockets: bool = False, discussion_type: Optional[str] = None, ): """Add an image submission to the subreddit. :param title: The title of the submission. :param image_path: The path to an image, to upload and post. :param collection_id: The UUID of a :class:`.Collection` to add the newly-submitted post to. :param flair_id: The flair template to select (default: None). :param flair_text: If the template's ``flair_text_editable`` value is True, this value will set a custom text (default: None). ``flair_id`` is required when ``flair_text`` is provided. :param resubmit: When False, an error will occur if the URL has already been submitted (default: True). :param send_replies: When True, messages will be sent to the submission author when comments are made to the submission (default: True). :param nsfw: Whether or not the submission should be marked NSFW (default: False). :param spoiler: Whether or not the submission should be marked as a spoiler (default: False). :param timeout: Specifies a particular timeout, in seconds. Use to avoid "Websocket error" exceptions (default: 10). :param without_websockets: Set to ``True`` to disable use of WebSockets (see note below for an explanation). If ``True``, this method doesn't return anything. (default: ``False``). :param discussion_type: Set to ``CHAT`` to enable live discussion instead of traditional comments (default: None). :returns: A :class:`.Submission` object for the newly created submission, unless ``without_websockets`` is ``True``. :raises: :class:`.ClientException` if ``image_path`` refers to a file that is not an image. .. note:: Reddit's API uses WebSockets to respond with the link of the newly created post. If this fails, the method will raise :class:`.WebSocketException`. Occasionally, the Reddit post will still be created. More often, there is an error with the image file. If you frequently get exceptions but successfully created posts, try setting the ``timeout`` parameter to a value above 10. To disable the use of WebSockets, set ``without_websockets=True``. This will make the method return ``None``, though the post will still be created. You may wish to do this if you are running your program in a restricted network environment, or using a proxy that doesn't support WebSockets connections. For example, to submit an image to ``r/reddit_api_test`` do: .. code-block:: python title = "My favorite picture" image = "/path/to/image.png" reddit.subreddit("reddit_api_test").submit_image(title, image) .. seealso:: - :meth:`.submit` to submit url posts and selftexts - :meth:`.submit_video`. to submit videos and videogifs - :meth:`.submit_gallery`. to submit more than one image in the same post """ data = { "sr": str(self), "resubmit": bool(resubmit), "sendreplies": bool(send_replies), "title": title, "nsfw": bool(nsfw), "spoiler": bool(spoiler), "validate_on_submit": self._reddit.validate_on_submit, } for key, value in ( ("flair_id", flair_id), ("flair_text", flair_text), ("collection_id", collection_id), ("discussion_type", discussion_type), ): if value is not None: data[key] = value image_url, websocket_url = self._upload_media( image_path, expected_mime_prefix="image" ) data.update(kind="image", url=image_url) if without_websockets: websocket_url = None return self._submit_media( data, timeout, websocket_url=websocket_url, ) def submit_poll( self, title: str, selftext: str, options: List[str], duration: int, flair_id: Optional[str] = None, flair_text: Optional[str] = None, resubmit: bool = True, send_replies: bool = True, nsfw: bool = False, spoiler: bool = False, collection_id: Optional[str] = None, discussion_type: Optional[str] = None, ): """Add a poll submission to the subreddit. :param title: The title of the submission. :param selftext: The Markdown formatted content for the submission. Use an empty string, ``""``, to make a submission with no text contents. :param options: A ``list`` of two to six poll options as ``str``. :param duration: The number of days the poll should accept votes, as an ``int``. Valid values are between ``1`` and ``7``, inclusive. :param collection_id: The UUID of a :class:`.Collection` to add the newly-submitted post to. :param flair_id: The flair template to select (default: None). :param flair_text: If the template's ``flair_text_editable`` value is True, this value will set a custom text (default: None). ``flair_id`` is required when ``flair_text`` is provided. :param resubmit: When False, an error will occur if the URL has already been submitted (default: True). :param send_replies: When True, messages will be sent to the submission author when comments are made to the submission (default: True). :param nsfw: Whether or not the submission should be marked NSFW (default: False). :param spoiler: Whether or not the submission should be marked as a spoiler (default: False). :param discussion_type: Set to ``CHAT`` to enable live discussion instead of traditional comments (default: None). :returns: A :class:`~.Submission` object for the newly created submission. For example, to submit a poll to ``r/reddit_api_test`` do: .. code-block:: python title = "Do you like PRAW?" reddit.subreddit("reddit_api_test").submit_poll( title, selftext="", options=["Yes", "No"], duration=3 ) """ data = { "sr": str(self), "text": selftext, "options": options, "duration": duration, "resubmit": bool(resubmit), "sendreplies": bool(send_replies), "title": title, "nsfw": bool(nsfw), "spoiler": bool(spoiler), "validate_on_submit": self._reddit.validate_on_submit, } for key, value in ( ("flair_id", flair_id), ("flair_text", flair_text), ("collection_id", collection_id), ("discussion_type", discussion_type), ): if value is not None: data[key] = value return self._reddit.post(API_PATH["submit_poll_post"], json=data) def submit_video( self, title: str, video_path: str, videogif: bool = False, thumbnail_path: Optional[str] = None, flair_id: Optional[str] = None, flair_text: Optional[str] = None, resubmit: bool = True, send_replies: bool = True, nsfw: bool = False, spoiler: bool = False, timeout: int = 10, collection_id: Optional[str] = None, without_websockets: bool = False, discussion_type: Optional[str] = None, ): """Add a video or videogif submission to the subreddit. :param title: The title of the submission. :param video_path: The path to a video, to upload and post. :param videogif: A ``bool`` value. If ``True``, the video is uploaded as a videogif, which is essentially a silent video (default: ``False``). :param thumbnail_path: (Optional) The path to an image, to be uploaded and used as the thumbnail for this video. If not provided, the PRAW logo will be used as the thumbnail. :param collection_id: The UUID of a :class:`.Collection` to add the newly-submitted post to. :param flair_id: The flair template to select (default: ``None``). :param flair_text: If the template's ``flair_text_editable`` value is True, this value will set a custom text (default: ``None``). ``flair_id`` is required when ``flair_text`` is provided. :param resubmit: When False, an error will occur if the URL has already been submitted (default: ``True``). :param send_replies: When True, messages will be sent to the submission author when comments are made to the submission (default: ``True``). :param nsfw: Whether or not the submission should be marked NSFW (default: False). :param spoiler: Whether or not the submission should be marked as a spoiler (default: False). :param timeout: Specifies a particular timeout, in seconds. Use to avoid "Websocket error" exceptions (default: 10). :param without_websockets: Set to ``True`` to disable use of WebSockets (see note below for an explanation). If ``True``, this method doesn't return anything. (default: ``False``). :param discussion_type: Set to ``CHAT`` to enable live discussion instead of traditional comments (default: None). :returns: A :class:`.Submission` object for the newly created submission, unless ``without_websockets`` is ``True``. :raises: :class:`.ClientException` if ``video_path`` refers to a file that is not a video. .. note:: Reddit's API uses WebSockets to respond with the link of the newly created post. If this fails, the method will raise :class:`.WebSocketException`. Occasionally, the Reddit post will still be created. More often, there is an error with the image file. If you frequently get exceptions but successfully created posts, try setting the ``timeout`` parameter to a value above 10. To disable the use of WebSockets, set ``without_websockets=True``. This will make the method return ``None``, though the post will still be created. You may wish to do this if you are running your program in a restricted network environment, or using a proxy that doesn't support WebSockets connections. For example, to submit a video to ``r/reddit_api_test`` do: .. code-block:: python title = "My favorite movie" video = "/path/to/video.mp4" reddit.subreddit("reddit_api_test").submit_video(title, video) .. seealso:: - :meth:`.submit` to submit url posts and selftexts - :meth:`.submit_image` to submit images - :meth:`.submit_gallery`. to submit more than one image in the same post """ data = { "sr": str(self), "resubmit": bool(resubmit), "sendreplies": bool(send_replies), "title": title, "nsfw": bool(nsfw), "spoiler": bool(spoiler), "validate_on_submit": self._reddit.validate_on_submit, } for key, value in ( ("flair_id", flair_id), ("flair_text", flair_text), ("collection_id", collection_id), ("discussion_type", discussion_type), ): if value is not None: data[key] = value video_url, websocket_url = self._upload_media( video_path, expected_mime_prefix="video" ) data.update( kind="videogif" if videogif else "video", url=video_url, # if thumbnail_path is None, it uploads the PRAW logo video_poster_url=self._upload_media(thumbnail_path)[0], ) if without_websockets: websocket_url = None return self._submit_media( data, timeout, websocket_url=websocket_url, ) def subscribe( self, other_subreddits: Optional[List["praw.models.Subreddit"]] = None ): """Subscribe to the subreddit. :param other_subreddits: When provided, also subscribe to the provided list of subreddits. For example, to subscribe to ``r/test``: .. code-block:: python reddit.subreddit("test").subscribe() """ data = { "action": "sub", "skip_inital_defaults": True, "sr_name": self._subreddit_list(self, other_subreddits), } self._reddit.post(API_PATH["subscribe"], data=data) def traffic(self) -> Dict[str, List[List[int]]]: """Return a dictionary of the subreddit's traffic statistics. :raises: ``prawcore.NotFound`` when the traffic stats aren't available to the authenticated user, that is, they are not public and the authenticated user is not a moderator of the subreddit. The traffic method returns a dict with three keys. The keys are ``day``, ``hour`` and ``month``. Each key contains a list of lists with 3 or 4 values. The first value is a timestamp indicating the start of the category (start of the day for the ``day`` key, start of the hour for the ``hour`` key, etc.). The second, third, and fourth values indicate the unique pageviews, total pageviews, and subscribers, respectively. .. note:: The ``hour`` key does not contain subscribers, and therefore each sub-list contains three values. For example, to get the traffic stats for ``r/test``: .. code-block:: python stats = reddit.subreddit("test").traffic() """ return self._reddit.get(API_PATH["about_traffic"].format(subreddit=self)) def unsubscribe( self, other_subreddits: Optional[List["praw.models.Subreddit"]] = None ): """Unsubscribe from the subreddit. :param other_subreddits: When provided, also unsubscribe from the provided list of subreddits. To unsubscribe from ``r/test``: .. code-block:: python reddit.subreddit("test").unsubscribe() """ data = { "action": "unsub", "sr_name": self._subreddit_list(self, other_subreddits), } self._reddit.post(API_PATH["subscribe"], data=data) WidgetEncoder._subreddit_class = Subreddit class SubredditFilters: """Provide functions to interact with the special Subreddit's filters. Members of this class should be utilized via ``Subreddit.filters``. For example, to add a filter, run: .. code-block:: python reddit.subreddit("all").filters.add("subreddit_name") """ def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditFilters instance. :param subreddit: The special subreddit whose filters to work with. As of this writing filters can only be used with the special subreddits ``all`` and ``mod``. """ self.subreddit = subreddit def __iter__(self) -> Generator["praw.models.Subreddit", None, None]: """Iterate through the special subreddit's filters. This method should be invoked as: .. code-block:: python for subreddit in reddit.subreddit("NAME").filters: ... """ url = API_PATH["subreddit_filter_list"].format( special=self.subreddit, user=self.subreddit._reddit.user.me() ) params = {"unique": self.subreddit._reddit._next_unique} response_data = self.subreddit._reddit.get(url, params=params) for subreddit in response_data.subreddits: yield subreddit def add(self, subreddit: Union["praw.models.Subreddit", str]): """Add ``subreddit`` to the list of filtered subreddits. :param subreddit: The subreddit to add to the filter list. Items from subreddits added to the filtered list will no longer be included when obtaining listings for ``r/all``. Alternatively, you can filter a subreddit temporarily from a special listing in a manner like so: .. code-block:: python reddit.subreddit("all-redditdev-learnpython") :raises: ``prawcore.NotFound`` when calling on a non-special subreddit. """ url = API_PATH["subreddit_filter"].format( special=self.subreddit, user=self.subreddit._reddit.user.me(), subreddit=subreddit, ) self.subreddit._reddit.put(url, data={"model": dumps({"name": str(subreddit)})}) def remove(self, subreddit: Union["praw.models.Subreddit", str]): """Remove ``subreddit`` from the list of filtered subreddits. :param subreddit: The subreddit to remove from the filter list. :raises: ``prawcore.NotFound`` when calling on a non-special subreddit. """ url = API_PATH["subreddit_filter"].format( special=self.subreddit, user=self.subreddit._reddit.user.me(), subreddit=str(subreddit), ) self.subreddit._reddit.delete(url) class SubredditFlair: """Provide a set of functions to interact with a Subreddit's flair.""" @cachedproperty def link_templates( self, ) -> "praw.models.reddit.subreddit.SubredditLinkFlairTemplates": """Provide an instance of :class:`.SubredditLinkFlairTemplates`. Use this attribute for interacting with a subreddit's link flair templates. For example to list all the link flair templates for a subreddit which you have the ``flair`` moderator permission on try: .. code-block:: python for template in reddit.subreddit("NAME").flair.link_templates: print(template) """ return SubredditLinkFlairTemplates(self.subreddit) @cachedproperty def templates( self, ) -> "praw.models.reddit.subreddit.SubredditRedditorFlairTemplates": """Provide an instance of :class:`.SubredditRedditorFlairTemplates`. Use this attribute for interacting with a subreddit's flair templates. For example to list all the flair templates for a subreddit which you have the ``flair`` moderator permission on try: .. code-block:: python for template in reddit.subreddit("NAME").flair.templates: print(template) """ return SubredditRedditorFlairTemplates(self.subreddit) def __call__( self, redditor: Optional[Union["praw.models.Redditor", str]] = None, **generator_kwargs: Any, ) -> Iterator["praw.models.Redditor"]: """Return a :class:`.ListingGenerator` for Redditors and their flairs. :param redditor: When provided, yield at most a single :class:`~.Redditor` instance (default: None). Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. Usage: .. code-block:: python for flair in reddit.subreddit("NAME").flair(limit=None): print(flair) """ Subreddit._safely_add_arguments(generator_kwargs, "params", name=redditor) generator_kwargs.setdefault("limit", None) url = API_PATH["flairlist"].format(subreddit=self.subreddit) return ListingGenerator(self.subreddit._reddit, url, **generator_kwargs) def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditFlair instance. :param subreddit: The subreddit whose flair to work with. """ self.subreddit = subreddit def configure( self, position: str = "right", self_assign: bool = False, link_position: str = "left", link_self_assign: bool = False, **settings: Any, ): """Update the subreddit's flair configuration. :param position: One of left, right, or False to disable (default: right). :param self_assign: (boolean) Permit self assignment of user flair (default: False). :param link_position: One of left, right, or False to disable (default: left). :param link_self_assign: (boolean) Permit self assignment of link flair (default: False). Additional keyword arguments can be provided to handle new settings as Reddit introduces them. """ data = { "flair_enabled": bool(position), "flair_position": position or "right", "flair_self_assign_enabled": self_assign, "link_flair_position": link_position or "", "link_flair_self_assign_enabled": link_self_assign, } data.update(settings) url = API_PATH["flairconfig"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data=data) def delete(self, redditor: Union["praw.models.Redditor", str]): """Delete flair for a Redditor. :param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. .. seealso:: :meth:`~praw.models.reddit.subreddit.SubredditFlair.update` to delete the flair of many Redditors at once. """ url = API_PATH["deleteflair"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data={"name": str(redditor)}) def delete_all(self) -> List[Dict[str, Union[str, bool, Dict[str, str]]]]: """Delete all Redditor flair in the Subreddit. :returns: List of dictionaries indicating the success or failure of each delete. """ return self.update(x["user"] for x in self()) def set( self, redditor: Union["praw.models.Redditor", str], text: str = "", css_class: str = "", flair_template_id: Optional[str] = None, ): """Set flair for a Redditor. :param redditor: (Required) A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. :param text: The flair text to associate with the Redditor or Submission (default: ""). :param css_class: The css class to associate with the flair html ((default: "")). Use either this or ``flair_template_id``. :param flair_template_id: The ID of the flair template to be used (default: ``None``). Use either this or ``css_class``. This method can only be used by an authenticated user who is a moderator of the associated Subreddit. For example: .. code-block:: python reddit.subreddit("redditdev").flair.set("bboe", "PRAW author", css_class="mods") template = "6bd28436-1aa7-11e9-9902-0e05ab0fad46" reddit.subreddit("redditdev").flair.set( "spez", "Reddit CEO", flair_template_id=template ) """ if css_class and flair_template_id is not None: raise TypeError( "Parameter `css_class` cannot be used in conjunction with" " `flair_template_id`." ) data = {"name": str(redditor), "text": text} if flair_template_id is not None: data["flair_template_id"] = flair_template_id url = API_PATH["select_flair"].format(subreddit=self.subreddit) else: data["css_class"] = css_class url = API_PATH["flair"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data=data) def update( self, flair_list: Iterator[ Union[ str, "praw.models.Redditor", Dict[str, Union[str, "praw.models.Redditor"]], ] ], text: str = "", css_class: str = "", ) -> List[Dict[str, Union[str, bool, Dict[str, str]]]]: """Set or clear the flair for many Redditors at once. :param flair_list: Each item in this list should be either: the name of a Redditor, an instance of :class:`.Redditor`, or a dictionary mapping keys ``user``, ``flair_text``, and ``flair_css_class`` to their respective values. The ``user`` key should map to a Redditor, as described above. When a dictionary isn't provided, or the dictionary is missing one of ``flair_text``, or ``flair_css_class`` attributes the default values will come from the the following arguments. :param text: The flair text to use when not explicitly provided in ``flair_list`` (default: ""). :param css_class: The css class to use when not explicitly provided in ``flair_list`` (default: ""). :returns: List of dictionaries indicating the success or failure of each update. For example, to clear the flair text, and set the ``praw`` flair css class on a few users try: .. code-block:: python subreddit.flair.update(["bboe", "spez", "spladug"], css_class="praw") """ templines = StringIO() for item in flair_list: if isinstance(item, dict): writer(templines).writerow( [ str(item["user"]), item.get("flair_text", text), item.get("flair_css_class", css_class), ] ) else: writer(templines).writerow([str(item), text, css_class]) lines = templines.getvalue().splitlines() templines.close() response = [] url = API_PATH["flaircsv"].format(subreddit=self.subreddit) while lines: data = {"flair_csv": "\n".join(lines[:100])} response.extend(self.subreddit._reddit.post(url, data=data)) lines = lines[100:] return response class SubredditFlairTemplates: """Provide functions to interact with a Subreddit's flair templates.""" @staticmethod def flair_type(is_link: bool) -> str: """Return LINK_FLAIR or USER_FLAIR depending on ``is_link`` value.""" return "LINK_FLAIR" if is_link else "USER_FLAIR" def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditFlairTemplate instance. :param subreddit: The subreddit whose flair templates to work with. .. note:: This class should not be initialized directly. Instead obtain an instance via: ``reddit.subreddit("subreddit_name").flair.templates`` or ``reddit.subreddit("subreddit_name").flair.link_templates``. """ self.subreddit = subreddit def __iter__(self): """Abstract method to return flair templates.""" raise NotImplementedError() def _add( self, text: str, css_class: str = "", text_editable: bool = False, is_link: Optional[bool] = None, background_color: Optional[str] = None, text_color: Optional[str] = None, mod_only: Optional[bool] = None, allowable_content: Optional[str] = None, max_emojis: Optional[int] = None, ): url = API_PATH["flairtemplate_v2"].format(subreddit=self.subreddit) data = { "allowable_content": allowable_content, "background_color": background_color, "css_class": css_class, "flair_type": self.flair_type(is_link), "max_emojis": max_emojis, "mod_only": bool(mod_only), "text": text, "text_color": text_color, "text_editable": bool(text_editable), } self.subreddit._reddit.post(url, data=data) def _clear(self, is_link: Optional[bool] = None): url = API_PATH["flairtemplateclear"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data={"flair_type": self.flair_type(is_link)}) def delete(self, template_id: str): """Remove a flair template provided by ``template_id``. For example, to delete the first Redditor flair template listed, try: .. code-block:: python template_info = list(subreddit.flair.templates)[0] subreddit.flair.templates.delete(template_info["id"]) """ url = API_PATH["flairtemplatedelete"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data={"flair_template_id": template_id}) def update( self, template_id: str, text: Optional[str] = None, css_class: Optional[str] = None, text_editable: Optional[bool] = None, background_color: Optional[str] = None, text_color: Optional[str] = None, mod_only: Optional[bool] = None, allowable_content: Optional[str] = None, max_emojis: Optional[int] = None, fetch: bool = True, ): """Update the flair template provided by ``template_id``. :param template_id: The flair template to update. If not valid then an exception will be thrown. :param text: The flair template's new text (required). :param css_class: The flair template's new css_class (default: ""). :param text_editable: (boolean) Indicate if the flair text can be modified for each Redditor that sets it (default: False). :param background_color: The flair template's new background color, as a hex color. :param text_color: The flair template's new text color, either ``"light"`` or ``"dark"``. :param mod_only: (boolean) Indicate if the flair can only be used by moderators. :param allowable_content: If specified, most be one of ``"all"``, ``"emoji"``, or ``"text"`` to restrict content to that type. If set to ``"emoji"`` then the ``"text"`` param must be a valid emoji string, for example, ``":snoo:"``. :param max_emojis: (int) Maximum emojis in the flair (Reddit defaults this value to 10). :param fetch: Whether or not PRAW will fetch existing information on the existing flair before updating (Default: True). .. warning:: If parameter ``fetch`` is set to ``False``, all parameters not provided will be reset to default (``None`` or ``False``) values. For example, to make a user flair template text_editable, try: .. code-block:: python template_info = list(subreddit.flair.templates)[0] subreddit.flair.templates.update( template_info["id"], template_info["flair_text"], text_editable=True ) """ url = API_PATH["flairtemplate_v2"].format(subreddit=self.subreddit) data = { "allowable_content": allowable_content, "background_color": background_color, "css_class": css_class, "flair_template_id": template_id, "max_emojis": max_emojis, "mod_only": mod_only, "text": text, "text_color": text_color, "text_editable": text_editable, } if fetch: _existing_data = [ template for template in iter(self) if template["id"] == template_id ] if len(_existing_data) != 1: raise InvalidFlairTemplateID(template_id) else: existing_data = _existing_data[0] for key, value in existing_data.items(): if data.get(key) is None: data[key] = value self.subreddit._reddit.post(url, data=data) class SubredditRedditorFlairTemplates(SubredditFlairTemplates): """Provide functions to interact with Redditor flair templates.""" def __iter__( self, ) -> Generator[Dict[str, Union[str, int, bool, List[Dict[str, str]]]], None, None]: """Iterate through the user flair templates. For example: .. code-block:: python for template in reddit.subreddit("NAME").flair.templates: print(template) """ url = API_PATH["user_flair"].format(subreddit=self.subreddit) params = {"unique": self.subreddit._reddit._next_unique} for template in self.subreddit._reddit.get(url, params=params): yield template def add( self, text: str, css_class: str = "", text_editable: bool = False, background_color: Optional[str] = None, text_color: Optional[str] = None, mod_only: Optional[bool] = None, allowable_content: Optional[str] = None, max_emojis: Optional[int] = None, ): """Add a Redditor flair template to the associated subreddit. :param text: The flair template's text (required). :param css_class: The flair template's css_class (default: ""). :param text_editable: (boolean) Indicate if the flair text can be modified for each Redditor that sets it (default: False). :param background_color: The flair template's new background color, as a hex color. :param text_color: The flair template's new text color, either ``"light"`` or ``"dark"``. :param mod_only: (boolean) Indicate if the flair can only be used by moderators. :param allowable_content: If specified, most be one of ``"all"``, ``"emoji"``, or ``"text"`` to restrict content to that type. If set to ``"emoji"`` then the ``"text"`` param must be a valid emoji string, for example, ``":snoo:"``. :param max_emojis: (int) Maximum emojis in the flair (Reddit defaults this value to 10). For example, to add an editable Redditor flair try: .. code-block:: python reddit.subreddit("NAME").flair.templates.add(css_class="praw", text_editable=True) """ self._add( text, css_class=css_class, text_editable=text_editable, is_link=False, background_color=background_color, text_color=text_color, mod_only=mod_only, allowable_content=allowable_content, max_emojis=max_emojis, ) def clear(self): """Remove all Redditor flair templates from the subreddit. For example: .. code-block:: python reddit.subreddit("NAME").flair.templates.clear() """ self._clear(is_link=False) class SubredditLinkFlairTemplates(SubredditFlairTemplates): """Provide functions to interact with link flair templates.""" def __iter__( self, ) -> Generator[Dict[str, Union[str, int, bool, List[Dict[str, str]]]], None, None]: """Iterate through the link flair templates. For example: .. code-block:: python for template in reddit.subreddit("NAME").flair.link_templates: print(template) """ url = API_PATH["link_flair"].format(subreddit=self.subreddit) for template in self.subreddit._reddit.get(url): yield template def add( self, text: str, css_class: str = "", text_editable: bool = False, background_color: Optional[str] = None, text_color: Optional[str] = None, mod_only: Optional[bool] = None, allowable_content: Optional[str] = None, max_emojis: Optional[int] = None, ): """Add a link flair template to the associated subreddit. :param text: The flair template's text (required). :param css_class: The flair template's css_class (default: ""). :param text_editable: (boolean) Indicate if the flair text can be modified for each Redditor that sets it (default: False). :param background_color: The flair template's new background color, as a hex color. :param text_color: The flair template's new text color, either ``"light"`` or ``"dark"``. :param mod_only: (boolean) Indicate if the flair can only be used by moderators. :param allowable_content: If specified, most be one of ``"all"``, ``"emoji"``, or ``"text"`` to restrict content to that type. If set to ``"emoji"`` then the ``"text"`` param must be a valid emoji string, for example, ``":snoo:"``. :param max_emojis: (int) Maximum emojis in the flair (Reddit defaults this value to 10). For example, to add an editable link flair try: .. code-block:: python reddit.subreddit("NAME").flair.link_templates.add(css_class="praw", text_editable=True) """ self._add( text, css_class=css_class, text_editable=text_editable, is_link=True, background_color=background_color, text_color=text_color, mod_only=mod_only, allowable_content=allowable_content, max_emojis=max_emojis, ) def clear(self): """Remove all link flair templates from the subreddit. For example: .. code-block:: python reddit.subreddit("NAME").flair.link_templates.clear() """ self._clear(is_link=True) class SubredditModeration: """Provides a set of moderation functions to a Subreddit. For example, to accept a moderation invite from subreddit ``r/test``: .. code-block:: python reddit.subreddit("test").mod.accept_invite() """ @staticmethod def _handle_only(only: Optional[str], generator_kwargs: Dict[str, Any]): if only is not None: if only == "submissions": only = "links" RedditBase._safely_add_arguments(generator_kwargs, "params", only=only) def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditModeration instance. :param subreddit: The subreddit to moderate. """ self.subreddit = subreddit self._stream = None def accept_invite(self): """Accept an invitation as a moderator of the community.""" url = API_PATH["accept_mod_invite"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url) def edited( self, only: Optional[str] = None, **generator_kwargs: Any ) -> Iterator[Union["praw.models.Comment", "praw.models.Submission"]]: """Return a :class:`.ListingGenerator` for edited comments and submissions. :param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield only results of that type. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. To print all items in the edited queue try: .. code-block:: python for item in reddit.subreddit("mod").mod.edited(limit=None): print(item) """ self._handle_only(only, generator_kwargs) return ListingGenerator( self.subreddit._reddit, API_PATH["about_edited"].format(subreddit=self.subreddit), **generator_kwargs, ) def inbox( self, **generator_kwargs: Any ) -> Iterator["praw.models.SubredditMessage"]: """Return a :class:`.ListingGenerator` for moderator messages. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. .. seealso:: :meth:`~.unread` for unread moderator messages. To print the last 5 moderator mail messages and their replies, try: .. code-block:: python for message in reddit.subreddit("mod").mod.inbox(limit=5): print(f"From: {message.author}, Body: {message.body}") for reply in message.replies: print(f"From: {reply.author}, Body: {reply.body}") """ return ListingGenerator( self.subreddit._reddit, API_PATH["moderator_messages"].format(subreddit=self.subreddit), **generator_kwargs, ) def log( self, action: Optional[str] = None, mod: Optional[Union["praw.models.Redditor", str]] = None, **generator_kwargs: Any, ) -> Iterator["praw.models.ModAction"]: """Return a :class:`.ListingGenerator` for moderator log entries. :param action: If given, only return log entries for the specified action. :param mod: If given, only return log entries for actions made by the passed in Redditor. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. To print the moderator and subreddit of the last 5 modlog entries try: .. code-block:: python for log in reddit.subreddit("mod").mod.log(limit=5): print(f"Mod: {log.mod}, Subreddit: {log.subreddit}") """ params = {"mod": str(mod) if mod else mod, "type": action} Subreddit._safely_add_arguments(generator_kwargs, "params", **params) return ListingGenerator( self.subreddit._reddit, API_PATH["about_log"].format(subreddit=self.subreddit), **generator_kwargs, ) def modqueue( self, only: Optional[str] = None, **generator_kwargs: Any ) -> Iterator[Union["praw.models.Submission", "praw.models.Comment"]]: """Return a :class:`.ListingGenerator` for modqueue items. :param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield only results of that type. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. To print all modqueue items try: .. code-block:: python for item in reddit.subreddit("mod").mod.modqueue(limit=None): print(item) """ self._handle_only(only, generator_kwargs) return ListingGenerator( self.subreddit._reddit, API_PATH["about_modqueue"].format(subreddit=self.subreddit), **generator_kwargs, ) @cachedproperty def stream(self) -> "praw.models.reddit.subreddit.SubredditModerationStream": """Provide an instance of :class:`.SubredditModerationStream`. Streams can be used to indefinitely retrieve Moderator only items from :class:`.SubredditModeration` made to moderated subreddits, like: .. code-block:: python for log in reddit.subreddit("mod").mod.stream.log(): print(f"Mod: {log.mod}, Subreddit: {log.subreddit}") """ return SubredditModerationStream(self.subreddit) @cachedproperty def removal_reasons(self) -> SubredditRemovalReasons: """Provide an instance of :class:`.SubredditRemovalReasons`. Use this attribute for interacting with a subreddit's removal reasons. For example to list all the removal reasons for a subreddit which you have the ``posts`` moderator permission on, try: .. code-block:: python for removal_reason in reddit.subreddit("NAME").mod.removal_reasons: print(removal_reason) A single removal reason can be lazily retrieved via: .. code-block:: python reddit.subreddit("NAME").mod.removal_reasons["reason_id"] .. note:: Attempting to access attributes of an nonexistent removal reason will result in a :class:`.ClientException`. """ return SubredditRemovalReasons(self.subreddit) def reports( self, only: Optional[str] = None, **generator_kwargs: Any ) -> Iterator[Union["praw.models.Submission", "praw.models.Comment"]]: """Return a :class:`.ListingGenerator` for reported comments and submissions. :param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield only results of that type. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. To print the user and mod report reasons in the report queue try: .. code-block:: python for reported_item in reddit.subreddit("mod").mod.reports(): print(f"User Reports: {reported_item.user_reports}") print(f"Mod Reports: {reported_item.mod_reports}") """ self._handle_only(only, generator_kwargs) return ListingGenerator( self.subreddit._reddit, API_PATH["about_reports"].format(subreddit=self.subreddit), **generator_kwargs, ) def settings(self) -> Dict[str, Union[str, int, bool]]: """Return a dictionary of the subreddit's current settings.""" url = API_PATH["subreddit_settings"].format(subreddit=self.subreddit) return self.subreddit._reddit.get(url)["data"] def spam( self, only: Optional[str] = None, **generator_kwargs: Any ) -> Iterator[Union["praw.models.Submission", "praw.models.Comment"]]: """Return a :class:`.ListingGenerator` for spam comments and submissions. :param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield only results of that type. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. To print the items in the spam queue try: .. code-block:: python for item in reddit.subreddit("mod").mod.spam(): print(item) """ self._handle_only(only, generator_kwargs) return ListingGenerator( self.subreddit._reddit, API_PATH["about_spam"].format(subreddit=self.subreddit), **generator_kwargs, ) def unmoderated( self, **generator_kwargs: Any ) -> Iterator["praw.models.Submission"]: """Return a :class:`.ListingGenerator` for unmoderated submissions. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. To print the items in the unmoderated queue try: .. code-block:: python for item in reddit.subreddit("mod").mod.unmoderated(): print(item) """ return ListingGenerator( self.subreddit._reddit, API_PATH["about_unmoderated"].format(subreddit=self.subreddit), **generator_kwargs, ) def unread( self, **generator_kwargs: Any ) -> Iterator["praw.models.SubredditMessage"]: """Return a :class:`.ListingGenerator` for unread moderator messages. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. .. seealso:: :meth:`inbox` for all messages. To print the mail in the unread modmail queue try: .. code-block:: python for message in reddit.subreddit("mod").mod.unread(): print(f"From: {message.author}, To: {message.dest}") """ return ListingGenerator( self.subreddit._reddit, API_PATH["moderator_unread"].format(subreddit=self.subreddit), **generator_kwargs, ) def update( self, **settings: Union[str, int, bool] ) -> Dict[str, Union[str, int, bool]]: """Update the subreddit's settings. See https://www.reddit.com/dev/api#POST_api_site_admin for the full list. :param all_original_content: Mandate all submissions to be original content only. :param allow_chat_post_creation: Allow users to create chat submissions. :param allow_images: Allow users to upload images using the native image hosting. :param allow_polls: Allow users to post polls to the subreddit. :param allow_post_crossposts: Allow users to crosspost submissions from other subreddits. :param allow_videos: Allow users to upload videos using the native image hosting. :param collapse_deleted_comments: Collapse deleted and removed comments on comments pages by default. :param comment_score_hide_mins: The number of minutes to hide comment scores. :param content_options: The types of submissions users can make. One of ``any``, ``link``, ``self``. :param crowd_control_chat_level: Controls the crowd control level for chat rooms. Goes from 0-3. :param crowd_control_level: Controls the crowd control level for submissions. Goes from 0-3. :param crowd_control_mode: Enables/disables crowd control. :param default_set: Allow the subreddit to appear on ``r/all`` as well as the default and trending lists. :param disable_contributor_requests: Specifies whether redditors may send automated modmail messages requesting approval as a submitter. :param exclude_banned_modqueue: Exclude posts by site-wide banned users from modqueue/unmoderated. :param free_form_reports: Allow users to specify custom reasons in the report menu. :param header_hover_text: The text seen when hovering over the snoo. :param hide_ads: Don't show ads within this subreddit. Only applies to Premium-user only subreddits. :param key_color: A 6-digit rgb hex color (e.g. ``"#AABBCC"``), used as a thematic color for your subreddit on mobile. :param language: A valid IETF language tag (underscore separated). :param original_content_tag_enabled: Enables the use of the ``original content`` label for submissions. :param over_18: Viewers must be over 18 years old (i.e. NSFW). :param public_description: Public description blurb. Appears in search results and on the landing page for private subreddits. :param restrict_commenting: Specifies whether approved users have the ability to comment. :param restrict_posting: Specifies whether approved users have the ability to submit posts. :param show_media: Show thumbnails on submissions. :param show_media_preview: Expand media previews on comments pages. :param spam_comments: Spam filter strength for comments. One of ``all``, ``low``, ``high``. :param spam_links: Spam filter strength for links. One of ``all``, ``low``, ``high``. :param spam_selfposts: Spam filter strength for selfposts. One of ``all``, ``low``, ``high``. :param spoilers_enabled: Enable marking posts as containing spoilers. :param submit_link_label: Custom label for submit link button (None for default). :param submit_text: Text to show on submission page. :param submit_text_label: Custom label for submit text post button (None for default). :param subreddit_type: One of ``archived``, ``employees_only``, ``gold_only``, ``gold_restricted``, ``private``, ``public``, ``restricted``. :param suggested_comment_sort: All comment threads will use this sorting method by default. Leave None, or choose one of ``confidence``, ``controversial``, ``live``, ``new``, ``old``, ``qa``, ``random``, ``top``. :param title: The title of the subreddit. :param welcome_message_enabled: Enables the subreddit welcome message. :param welcome_message_text: The text to be used as a welcome message. A welcome message is sent to all new subscribers by a Reddit bot. :param wiki_edit_age: Account age, in days, required to edit and create wiki pages. :param wiki_edit_karma: "praw.models.Subreddit" karma required to edit and create wiki pages. :param wikimode: One of ``anyone``, ``disabled``, ``modonly``. .. note:: Updating the subreddit sidebar on old reddit (``description``) is no longer supported using this method. You can update the sidebar by editing the ``"config/sidebar"`` wiki page. For example: .. code-block:: python sidebar = reddit.subreddit("test").wiki["config/sidebar"] sidebar.edit(content="new sidebar content") Additional keyword arguments can be provided to handle new settings as Reddit introduces them. Settings that are documented here and aren't explicitly set by you in a call to :meth:`.SubredditModeration.update` should retain their current value. If they do not, please file a bug. """ # These attributes come out using different names than they go in. remap = { "content_options": "link_type", "default_set": "allow_top", "header_hover_text": "header_title", "language": "lang", "subreddit_type": "type", } settings = {remap.get(key, key): value for key, value in settings.items()} settings["sr"] = self.subreddit.fullname return self.subreddit._reddit.patch(API_PATH["update_settings"], json=settings) class SubredditModerationStream: """Provides moderator streams.""" def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditModerationStream instance. :param subreddit: The moderated subreddit associated with the streams. """ self.subreddit = subreddit def edited( self, only: Optional[str] = None, **stream_options: Any ) -> Generator[Union["praw.models.Comment", "praw.models.Submission"], None, None]: """Yield edited comments and submissions as they become available. :param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield only results of that type. Keyword arguments are passed to :func:`.stream_generator`. For example, to retrieve all new edited submissions/comments made to all moderated subreddits, try: .. code-block:: python for item in reddit.subreddit("mod").mod.stream.edited(): print(item) """ return stream_generator(self.subreddit.mod.edited, only=only, **stream_options) def log( self, action: Optional[str] = None, mod: Optional[Union[str, "praw.models.Redditor"]] = None, **stream_options: Any, ) -> Generator["praw.models.ModAction", None, None]: """Yield moderator log entries as they become available. :param action: If given, only return log entries for the specified action. :param mod: If given, only return log entries for actions made by the passed in Redditor. For example, to retrieve all new mod actions made to all moderated subreddits, try: .. code-block:: python for log in reddit.subreddit("mod").mod.stream.log(): print(f"Mod: {log.mod}, Subreddit: {log.subreddit}") """ return stream_generator( self.subreddit.mod.log, action=action, mod=mod, attribute_name="id", **stream_options, ) def modmail_conversations( self, other_subreddits: Optional[List["praw.models.Subreddit"]] = None, sort: Optional[str] = None, state: Optional[str] = None, **stream_options: Any, ) -> Generator[ModmailConversation, None, None]: """Yield new-modmail conversations as they become available. :param other_subreddits: A list of :class:`.Subreddit` instances for which to fetch conversations (default: None). :param sort: Can be one of: mod, recent, unread, user (default: recent). :param state: Can be one of: all, appeals, archived, default, highlighted, inbox, inprogress, mod, new, notifications (default: all). "all" does not include mod or archived conversations. "inbox" does not include appeals conversations. Keyword arguments are passed to :func:`.stream_generator`. To print new mail in the unread modmail queue try: .. code-block:: python subreddit = reddit.subreddit("all") for message in subreddit.mod.stream.modmail_conversations(): print(f"From: {message.owner}, To: {message.participant}") """ # noqa: E501 if self.subreddit == "mod": self.subreddit = self.subreddit._reddit.subreddit("all") return stream_generator( self.subreddit.modmail.conversations, other_subreddits=other_subreddits, sort=sort, state=state, attribute_name="id", exclude_before=True, **stream_options, ) def modqueue( self, only: Optional[str] = None, **stream_options: Any ) -> Generator[Union["praw.models.Comment", "praw.models.Submission"], None, None]: """Yield comments/submissions in the modqueue as they become available. :param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield only results of that type. Keyword arguments are passed to :func:`.stream_generator`. To print all new modqueue items try: .. code-block:: python for item in reddit.subreddit("mod").mod.stream.modqueue(): print(item) """ return stream_generator( self.subreddit.mod.modqueue, only=only, **stream_options ) def reports( self, only: Optional[str] = None, **stream_options: Any ) -> Generator[Union["praw.models.Comment", "praw.models.Submission"], None, None]: """Yield reported comments and submissions as they become available. :param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield only results of that type. Keyword arguments are passed to :func:`.stream_generator`. To print new user and mod report reasons in the report queue try: .. code-block:: python for item in reddit.subreddit("mod").mod.stream.reports(): print(item) """ return stream_generator(self.subreddit.mod.reports, only=only, **stream_options) def spam( self, only: Optional[str] = None, **stream_options: Any ) -> Generator[Union["praw.models.Comment", "praw.models.Submission"], None, None]: """Yield spam comments and submissions as they become available. :param only: If specified, one of ``"comments"``, or ``"submissions"`` to yield only results of that type. Keyword arguments are passed to :func:`.stream_generator`. To print new items in the spam queue try: .. code-block:: python for item in reddit.subreddit("mod").mod.stream.spam(): print(item) """ return stream_generator(self.subreddit.mod.spam, only=only, **stream_options) def unmoderated( self, **stream_options: Any ) -> Generator["praw.models.Submission", None, None]: """Yield unmoderated submissions as they become available. Keyword arguments are passed to :func:`.stream_generator`. To print new items in the unmoderated queue try: .. code-block:: python for item in reddit.subreddit("mod").mod.stream.unmoderated(): print(item) """ return stream_generator(self.subreddit.mod.unmoderated, **stream_options) def unread( self, **stream_options: Any ) -> Generator["praw.models.SubredditMessage", None, None]: """Yield unread old modmail messages as they become available. Keyword arguments are passed to :func:`.stream_generator`. .. seealso:: :meth:`~.inbox` for all messages. To print new mail in the unread modmail queue try: .. code-block:: python for message in reddit.subreddit("mod").mod.stream.unread(): print(f"From: {message.author}, To: {message.dest}") """ return stream_generator(self.subreddit.mod.unread, **stream_options) class SubredditQuarantine: """Provides subreddit quarantine related methods. To opt-in into a quarantined subreddit: .. code-block:: python reddit.subreddit("test").quaran.opt_in() """ def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditQuarantine instance. :param subreddit: The subreddit associated with the quarantine. """ self.subreddit = subreddit def opt_in(self): """Permit your user access to the quarantined subreddit. Usage: .. code-block:: python subreddit = reddit.subreddit("QUESTIONABLE") next(subreddit.hot()) # Raises prawcore.Forbidden subreddit.quaran.opt_in() next(subreddit.hot()) # Returns Submission """ data = {"sr_name": self.subreddit} try: self.subreddit._reddit.post(API_PATH["quarantine_opt_in"], data=data) except Redirect: pass def opt_out(self): """Remove access to the quarantined subreddit. Usage: .. code-block:: python subreddit = reddit.subreddit("QUESTIONABLE") next(subreddit.hot()) # Returns Submission subreddit.quaran.opt_out() next(subreddit.hot()) # Raises prawcore.Forbidden """ data = {"sr_name": self.subreddit} try: self.subreddit._reddit.post(API_PATH["quarantine_opt_out"], data=data) except Redirect: pass class SubredditRelationship: """Represents a relationship between a redditor and subreddit. Instances of this class can be iterated through in order to discover the Redditors that make up the relationship. For example, banned users of a subreddit can be iterated through like so: .. code-block:: python for ban in reddit.subreddit("redditdev").banned(): print(f"{ban}: {ban.note}") """ def __call__( self, redditor: Optional[Union[str, "praw.models.Redditor"]] = None, **generator_kwargs, ) -> Iterator["praw.models.Redditor"]: """Return a :class:`.ListingGenerator` for Redditors in the relationship. :param redditor: When provided, yield at most a single :class:`~.Redditor` instance. This is useful to confirm if a relationship exists, or to fetch the metadata associated with a particular relationship (default: None). Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. """ Subreddit._safely_add_arguments(generator_kwargs, "params", user=redditor) url = API_PATH[f"list_{self.relationship}"].format(subreddit=self.subreddit) return ListingGenerator(self.subreddit._reddit, url, **generator_kwargs) def __init__(self, subreddit: "praw.models.Subreddit", relationship: str): """Create a SubredditRelationship instance. :param subreddit: The subreddit for the relationship. :param relationship: The name of the relationship. """ self.relationship = relationship self.subreddit = subreddit def add(self, redditor: Union[str, "praw.models.Redditor"], **other_settings: Any): """Add ``redditor`` to this relationship. :param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. """ data = {"name": str(redditor), "type": self.relationship} data.update(other_settings) url = API_PATH["friend"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data=data) def remove(self, redditor: Union[str, "praw.models.Redditor"]): """Remove ``redditor`` from this relationship. :param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. """ data = {"name": str(redditor), "type": self.relationship} url = API_PATH["unfriend"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data=data) class ContributorRelationship(SubredditRelationship): """Provides methods to interact with a Subreddit's contributors. Contributors are also known as approved submitters. Contributors of a subreddit can be iterated through like so: .. code-block:: python for contributor in reddit.subreddit("redditdev").contributor(): print(contributor) """ def leave(self): """Abdicate the contributor position.""" self.subreddit._reddit.post( API_PATH["leavecontributor"], data={"id": self.subreddit.fullname} ) class ModeratorRelationship(SubredditRelationship): """Provides methods to interact with a Subreddit's moderators. Moderators of a subreddit can be iterated through like so: .. code-block:: python for moderator in reddit.subreddit("redditdev").moderator(): print(moderator) """ PERMISSIONS = {"access", "config", "flair", "mail", "posts", "wiki"} @staticmethod def _handle_permissions(permissions: List[str], other_settings: dict): other_settings = deepcopy(other_settings) if other_settings else {} other_settings["permissions"] = permissions_string( permissions, ModeratorRelationship.PERMISSIONS ) return other_settings def __call__( self, redditor: Optional[Union[str, "praw.models.Redditor"]] = None ) -> List["praw.models.Redditor"]: # pylint: disable=arguments-differ """Return a list of Redditors who are moderators. :param redditor: When provided, return a list containing at most one :class:`~.Redditor` instance. This is useful to confirm if a relationship exists, or to fetch the metadata associated with a particular relationship (default: None). .. note:: Unlike other relationship callables, this relationship is not paginated. Thus it simply returns the full list, rather than an iterator for the results. To be used like: .. code-block:: python moderators = reddit.subreddit("nameofsub").moderator() For example, to list the moderators along with their permissions try: .. code-block:: python for moderator in reddit.subreddit("SUBREDDIT").moderator(): print(f"{moderator}: {moderator.mod_permissions}") """ params = {} if redditor is None else {"user": redditor} url = API_PATH[f"list_{self.relationship}"].format(subreddit=self.subreddit) return self.subreddit._reddit.get(url, params=params) # pylint: disable=arguments-differ def add( self, redditor: Union[str, "praw.models.Redditor"], permissions: Optional[List[str]] = None, **other_settings: Any, ): """Add or invite ``redditor`` to be a moderator of the subreddit. :param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. :param permissions: When provided (not ``None``), permissions should be a list of strings specifying which subset of permissions to grant. An empty list ``[]`` indicates no permissions, and when not provided ``None``, indicates full permissions. An invite will be sent unless the user making this call is an admin user. For example, to invite ``"spez"`` with ``"posts"`` and ``"mail"`` permissions to ``r/test``, try: .. code-block:: python reddit.subreddit("test").moderator.add("spez", ["posts", "mail"]) """ other_settings = self._handle_permissions(permissions, other_settings) super().add(redditor, **other_settings) # pylint: enable=arguments-differ def invite( self, redditor: Union[str, "praw.models.Redditor"], permissions: Optional[List[str]] = None, **other_settings: Any, ): """Invite ``redditor`` to be a moderator of the subreddit. :param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. :param permissions: When provided (not ``None``), permissions should be a list of strings specifying which subset of permissions to grant. An empty list ``[]`` indicates no permissions, and when not provided ``None``, indicates full permissions. For example, to invite ``"spez"`` with ``posts`` and ``mail`` permissions to ``r/test``, try: .. code-block:: python reddit.subreddit("test").moderator.invite("spez", ["posts", "mail"]) """ data = self._handle_permissions(permissions, other_settings) data.update({"name": str(redditor), "type": "moderator_invite"}) url = API_PATH["friend"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data=data) def invited( self, redditor: Optional[Union[str, "praw.models.Redditor"]] = None, **generator_kwargs: Any, ) -> Iterator["praw.models.Redditor"]: """Return a :class:`.ListingGenerator` for Redditors invited to be moderators. :param redditor: When provided, return a list containing at most one :class:`~.Redditor` instance. This is useful to confirm if a relationship exists, or to fetch the metadata associated with a particular relationship (default: None). Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. .. note:: Unlike other usages of :class:`.ListingGenerator`, ``limit`` has no effect in the quantity returned. This endpoint always returns moderators in batches of 25 at a time regardless of what ``limit`` is set to. Usage: .. code-block:: python for invited_mod in reddit.subreddit("NAME").moderator.invited(): print(invited_mod) """ generator_kwargs["params"] = {"username": redditor} if redditor else None url = API_PATH["list_invited_moderator"].format(subreddit=self.subreddit) return ListingGenerator(self.subreddit._reddit, url, **generator_kwargs) def leave(self): """Abdicate the moderator position (use with care). For example: .. code-block:: python reddit.subreddit("subredditname").moderator.leave() """ self.remove( self.subreddit._reddit.config.username or self.subreddit._reddit.user.me() ) def remove_invite(self, redditor: Union[str, "praw.models.Redditor"]): """Remove the moderator invite for ``redditor``. :param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. For example: .. code-block:: python reddit.subreddit("subredditname").moderator.remove_invite("spez") """ data = {"name": str(redditor), "type": "moderator_invite"} url = API_PATH["unfriend"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data=data) def update( self, redditor: Union[str, "praw.models.Redditor"], permissions: Optional[List[str]] = None, ): """Update the moderator permissions for ``redditor``. :param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. :param permissions: When provided (not ``None``), permissions should be a list of strings specifying which subset of permissions to grant. An empty list ``[]`` indicates no permissions, and when not provided, ``None``, indicates full permissions. For example, to add all permissions to the moderator, try: .. code-block:: python subreddit.moderator.update("spez") To remove all permissions from the moderator, try: .. code-block:: python subreddit.moderator.update("spez", []) """ url = API_PATH["setpermissions"].format(subreddit=self.subreddit) data = self._handle_permissions( permissions, {"name": str(redditor), "type": "moderator"} ) self.subreddit._reddit.post(url, data=data) def update_invite( self, redditor: Union[str, "praw.models.Redditor"], permissions: Optional[List[str]] = None, ): """Update the moderator invite permissions for ``redditor``. :param redditor: A redditor name (e.g., ``"spez"``) or :class:`~.Redditor` instance. :param permissions: When provided (not ``None``), permissions should be a list of strings specifying which subset of permissions to grant. An empty list ``[]`` indicates no permissions, and when not provided, ``None``, indicates full permissions. For example, to grant the ``flair``` and ``mail``` permissions to the moderator invite, try: .. code-block:: python subreddit.moderator.update_invite("spez", ["flair", "mail"]) """ url = API_PATH["setpermissions"].format(subreddit=self.subreddit) data = self._handle_permissions( permissions, {"name": str(redditor), "type": "moderator_invite"} ) self.subreddit._reddit.post(url, data=data) class Modmail: """Provides modmail functions for a subreddit. For example, to send a new modmail from the subreddit ``r/test`` to user ``u/spez`` with the subject ``test`` along with a message body of ``hello``: .. code-block:: python reddit.subreddit("test").modmail.create("test", "hello", "spez") """ def __call__( self, id: Optional[str] = None, mark_read: bool = False ): # noqa: D207, D301 """Return an individual conversation. :param id: A reddit base36 conversation ID, e.g., ``2gmz``. :param mark_read: If True, conversation is marked as read (default: False). For example: .. code-block:: python reddit.subreddit("redditdev").modmail("2gmz", mark_read=True) To print all messages from a conversation as Markdown source: .. code-block:: python conversation = reddit.subreddit("redditdev").modmail("2gmz", mark_read=True) for message in conversation.messages: print(message.body_markdown) ``ModmailConversation.user`` is a special instance of :class:`.Redditor` with extra attributes describing the non-moderator user's recent posts, comments, and modmail messages within the subreddit, as well as information on active bans and mutes. This attribute does not exist on internal moderator discussions. For example, to print the user's ban status: .. code-block:: python conversation = reddit.subreddit("redditdev").modmail("2gmz", mark_read=True) print(conversation.user.ban_status) To print a list of recent submissions by the user: .. code-block:: python conversation = reddit.subreddit("redditdev").modmail("2gmz", mark_read=True) print(conversation.user.recent_posts) """ # pylint: disable=invalid-name,redefined-builtin return ModmailConversation(self.subreddit._reddit, id=id, mark_read=mark_read) def __init__(self, subreddit: "praw.models.Subreddit"): """Construct an instance of the Modmail object.""" self.subreddit = subreddit def _build_subreddit_list( self, other_subreddits: Optional[List["praw.models.Subreddit"]] ): """Return a comma-separated list of subreddit display names.""" subreddits = [self.subreddit] + (other_subreddits or []) return ",".join(str(subreddit) for subreddit in subreddits) def bulk_read( self, other_subreddits: Optional[List[Union["praw.models.Subreddit", str]]] = None, state: Optional[str] = None, ) -> List[ModmailConversation]: """Mark conversations for subreddit(s) as read. Due to server-side restrictions, "all" is not a valid subreddit for this method. Instead, use :meth:`~.Modmail.subreddits` to get a list of subreddits using the new modmail. :param other_subreddits: A list of :class:`.Subreddit` instances for which to mark conversations (default: None). :param state: Can be one of: all, archived, highlighted, inprogress, mod, new, notifications, or appeals, (default: all). "all" does not include internal, archived, or appeals conversations. :returns: A list of :class:`.ModmailConversation` instances that were marked read. For example, to mark all notifications for a subreddit as read: .. code-block:: python subreddit = reddit.subreddit("redditdev") subreddit.modmail.bulk_read(state="notifications") """ params = {"entity": self._build_subreddit_list(other_subreddits)} if state: params["state"] = state response = self.subreddit._reddit.post( API_PATH["modmail_bulk_read"], params=params ) return [ self(conversation_id) for conversation_id in response["conversation_ids"] ] def conversations( self, after: Optional[str] = None, limit: Optional[int] = None, other_subreddits: Optional[List["praw.models.Subreddit"]] = None, sort: Optional[str] = None, state: Optional[str] = None, ) -> Generator[ModmailConversation, None, None]: # noqa: D207, D301 """Generate :class:`.ModmailConversation` objects for subreddit(s). :param after: A base36 modmail conversation id. When provided, the listing begins after this conversation (default: None). :param limit: The maximum number of conversations to fetch. If None, the server-side default is 25 at the time of writing (default: None). :param other_subreddits: A list of :class:`.Subreddit` instances for which to fetch conversations (default: None). :param sort: Can be one of: mod, recent, unread, user (default: recent). :param state: Can be one of: all, archived, highlighted, inprogress, mod, new, notifications, or appeals, (default: all). "all" does not include internal, archived, or appeals conversations. For example: .. code-block:: python conversations = reddit.subreddit("all").modmail.conversations(state="mod") """ params = {} if self.subreddit != "all": params["entity"] = self._build_subreddit_list(other_subreddits) for name, value in { "after": after, "limit": limit, "sort": sort, "state": state, }.items(): if value: params[name] = value response = self.subreddit._reddit.get( API_PATH["modmail_conversations"], params=params ) for conversation_id in response["conversationIds"]: data = { "conversation": response["conversations"][conversation_id], "messages": response["messages"], } yield ModmailConversation.parse( data, self.subreddit._reddit, convert_objects=False ) def create( self, subject: str, body: str, recipient: Union[str, "praw.models.Redditor"], author_hidden: bool = False, ) -> ModmailConversation: """Create a new modmail conversation. :param subject: The message subject. Cannot be empty. :param body: The message body. Cannot be empty. :param recipient: The recipient; a username or an instance of :class:`.Redditor`. :param author_hidden: When True, author is hidden from non-moderators (default: False). :returns: A :class:`.ModmailConversation` object for the newly created conversation. .. code-block:: python subreddit = reddit.subreddit("redditdev") redditor = reddit.redditor("bboe") subreddit.modmail.create("Subject", "Body", redditor) """ data = { "body": body, "isAuthorHidden": author_hidden, "srName": self.subreddit, "subject": subject, "to": recipient, } return self.subreddit._reddit.post(API_PATH["modmail_conversations"], data=data) def subreddits(self) -> Generator["praw.models.Subreddit", None, None]: """Yield subreddits using the new modmail that the user moderates. For example: .. code-block:: python subreddits = reddit.subreddit("all").modmail.subreddits() """ response = self.subreddit._reddit.get(API_PATH["modmail_subreddits"]) for value in response["subreddits"].values(): subreddit = self.subreddit._reddit.subreddit(value["display_name"]) subreddit.last_updated = value["lastUpdated"] yield subreddit def unread_count(self) -> Dict[str, int]: """Return unread conversation count by conversation state. At time of writing, possible states are: archived, highlighted, inprogress, mod, new, notifications, or appeals. :returns: A dict mapping conversation states to unread counts. For example, to print the count of unread moderator discussions: .. code-block:: python subreddit = reddit.subreddit("redditdev") unread_counts = subreddit.modmail.unread_count() print(unread_counts["mod"]) """ return self.subreddit._reddit.get(API_PATH["modmail_unread_count"]) class SubredditStream: """Provides submission and comment streams.""" def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditStream instance. :param subreddit: The subreddit associated with the streams. """ self.subreddit = subreddit def comments( self, **stream_options: Any ) -> Generator["praw.models.Comment", None, None]: """Yield new comments as they become available. Comments are yielded oldest first. Up to 100 historical comments will initially be returned. Keyword arguments are passed to :func:`.stream_generator`. .. note:: While PRAW tries to catch all new comments, some high-volume streams, especially the r/all stream, may drop some comments. For example, to retrieve all new comments made to the ``iama`` subreddit, try: .. code-block:: python for comment in reddit.subreddit("iama").stream.comments(): print(comment) To only retrieve new submissions starting when the stream is created, pass ``skip_existing=True``: .. code-block:: python subreddit = reddit.subreddit("iama") for comment in subreddit.stream.comments(skip_existing=True): print(comment) """ return stream_generator(self.subreddit.comments, **stream_options) def submissions( self, **stream_options: Any ) -> Generator["praw.models.Submission", None, None]: """Yield new submissions as they become available. Submissions are yielded oldest first. Up to 100 historical submissions will initially be returned. Keyword arguments are passed to :func:`.stream_generator`. .. note:: While PRAW tries to catch all new submissions, some high-volume streams, especially the r/all stream, may drop some submissions. For example, to retrieve all new submissions made to all of Reddit, try: .. code-block:: python for submission in reddit.subreddit("all").stream.submissions(): print(submission) """ return stream_generator(self.subreddit.new, **stream_options) class SubredditStylesheet: """Provides a set of stylesheet functions to a Subreddit. For example, to add the css data ``.test{color:blue}`` to the existing stylesheet: .. code-block:: python subreddit = reddit.subreddit("SUBREDDIT") stylesheet = subreddit.stylesheet() stylesheet.stylesheet += ".test{color:blue}" subreddit.stylesheet.update(stylesheet.stylesheet) """ def __call__(self) -> "praw.models.Stylesheet": """Return the subreddit's stylesheet. To be used as: .. code-block:: python stylesheet = reddit.subreddit("SUBREDDIT").stylesheet() """ url = API_PATH["about_stylesheet"].format(subreddit=self.subreddit) return self.subreddit._reddit.get(url) def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditStylesheet instance. :param subreddit: The subreddit associated with the stylesheet. An instance of this class is provided as: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet """ self.subreddit = subreddit def _update_structured_styles(self, style_data: Dict[str, Union[str, Any]]): url = API_PATH["structured_styles"].format(subreddit=self.subreddit) self.subreddit._reddit.patch(url, style_data) def _upload_image( self, image_path: str, data: Dict[str, Union[str, Any]] ) -> Dict[str, Any]: with open(image_path, "rb") as image: header = image.read(len(JPEG_HEADER)) image.seek(0) data["img_type"] = "jpg" if header == JPEG_HEADER else "png" url = API_PATH["upload_image"].format(subreddit=self.subreddit) response = self.subreddit._reddit.post( url, data=data, files={"file": image} ) if response["errors"]: error_type = response["errors"][0] error_value = response.get("errors_values", [""])[0] assert error_type in [ "BAD_CSS_NAME", "IMAGE_ERROR", ], "Please file a bug with PRAW." raise RedditAPIException([[error_type, error_value, None]]) return response def _upload_style_asset(self, image_path: str, image_type: str) -> str: data = {"imagetype": image_type, "filepath": basename(image_path)} data["mimetype"] = "image/jpeg" if image_path.lower().endswith(".png"): data["mimetype"] = "image/png" url = API_PATH["style_asset_lease"].format(subreddit=self.subreddit) upload_lease = self.subreddit._reddit.post(url, data=data)["s3UploadLease"] upload_data = {item["name"]: item["value"] for item in upload_lease["fields"]} upload_url = f"https:{upload_lease['action']}" with open(image_path, "rb") as image: response = self.subreddit._reddit._core._requestor._http.post( upload_url, data=upload_data, files={"file": image} ) response.raise_for_status() return f"{upload_url}/{upload_data['key']}" def delete_banner(self): """Remove the current subreddit (redesign) banner image. Succeeds even if there is no banner image. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.delete_banner() """ data = {"bannerBackgroundImage": ""} self._update_structured_styles(data) def delete_banner_additional_image(self): """Remove the current subreddit (redesign) banner additional image. Succeeds even if there is no additional image. Will also delete any configured hover image. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.delete_banner_additional_image() """ data = {"bannerPositionedImage": "", "secondaryBannerPositionedImage": ""} self._update_structured_styles(data) def delete_banner_hover_image(self): """Remove the current subreddit (redesign) banner hover image. Succeeds even if there is no hover image. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.delete_banner_hover_image() """ data = {"secondaryBannerPositionedImage": ""} self._update_structured_styles(data) def delete_header(self): """Remove the current subreddit header image. Succeeds even if there is no header image. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.delete_header() """ url = API_PATH["delete_sr_header"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url) def delete_image(self, name: str): """Remove the named image from the subreddit. Succeeds even if the named image does not exist. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.delete_image("smile") """ url = API_PATH["delete_sr_image"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data={"img_name": name}) def delete_mobile_header(self): """Remove the current subreddit mobile header. Succeeds even if there is no mobile header. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.delete_mobile_header() """ url = API_PATH["delete_sr_header"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url) def delete_mobile_icon(self): """Remove the current subreddit mobile icon. Succeeds even if there is no mobile icon. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.delete_mobile_icon() """ url = API_PATH["delete_sr_icon"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url) def update(self, stylesheet: str, reason: Optional[str] = None): """Update the subreddit's stylesheet. :param stylesheet: The CSS for the new stylesheet. :param reason: The reason for updating the stylesheet. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.update( "p { color: green; }", "color text green" ) """ data = {"op": "save", "reason": reason, "stylesheet_contents": stylesheet} url = API_PATH["subreddit_stylesheet"].format(subreddit=self.subreddit) self.subreddit._reddit.post(url, data=data) def upload(self, name: str, image_path: str) -> Dict[str, str]: """Upload an image to the Subreddit. :param name: The name to use for the image. If an image already exists with the same name, it will be replaced. :param image_path: A path to a jpeg or png image. :returns: A dictionary containing a link to the uploaded image under the key ``img_src``. :raises: ``prawcore.TooLarge`` if the overall request body is too large. :raises: :class:`.RedditAPIException` if there are other issues with the uploaded image. Unfortunately the exception info might not be very specific, so try through the website with the same image to see what the problem actually might be. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.upload("smile", "img.png") """ return self._upload_image(image_path, {"name": name, "upload_type": "img"}) def upload_banner(self, image_path: str): """Upload an image for the subreddit's (redesign) banner image. :param image_path: A path to a jpeg or png image. :raises: ``prawcore.TooLarge`` if the overall request body is too large. :raises: :class:`.RedditAPIException` if there are other issues with the uploaded image. Unfortunately the exception info might not be very specific, so try through the website with the same image to see what the problem actually might be. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.upload_banner("banner.png") """ image_type = "bannerBackgroundImage" image_url = self._upload_style_asset(image_path, image_type) self._update_structured_styles({image_type: image_url}) def upload_banner_additional_image( self, image_path: str, align: Optional[str] = None ): """Upload an image for the subreddit's (redesign) additional image. :param image_path: A path to a jpeg or png image. :param align: Either ``left``, ``centered``, or ``right``. (default: ``left``). :raises: ``prawcore.TooLarge`` if the overall request body is too large. :raises: :class:`.RedditAPIException` if there are other issues with the uploaded image. Unfortunately the exception info might not be very specific, so try through the website with the same image to see what the problem actually might be. For example: .. code-block:: python subreddit = reddit.subreddit("SUBREDDIT") subreddit.stylesheet.upload_banner_additional_image("banner.png") """ alignment = {} if align is not None: if align not in {"left", "centered", "right"}: raise ValueError( "align argument must be either `left`, `centered`, or `right`" ) alignment["bannerPositionedImagePosition"] = align image_type = "bannerPositionedImage" image_url = self._upload_style_asset(image_path, image_type) style_data = {image_type: image_url} if alignment: style_data.update(alignment) self._update_structured_styles(style_data) def upload_banner_hover_image(self, image_path: str): """Upload an image for the subreddit's (redesign) additional image. :param image_path: A path to a jpeg or png image. Fails if the Subreddit does not have an additional image defined :raises: ``prawcore.TooLarge`` if the overall request body is too large. :raises: :class:`.RedditAPIException` if there are other issues with the uploaded image. Unfortunately the exception info might not be very specific, so try through the website with the same image to see what the problem actually might be. For example: .. code-block:: python subreddit = reddit.subreddit("SUBREDDIT") subreddit.stylesheet.upload_banner_hover_image("banner.png") """ image_type = "secondaryBannerPositionedImage" image_url = self._upload_style_asset(image_path, image_type) self._update_structured_styles({image_type: image_url}) def upload_header(self, image_path: str) -> Dict[str, str]: """Upload an image to be used as the Subreddit's header image. :param image_path: A path to a jpeg or png image. :returns: A dictionary containing a link to the uploaded image under the key ``img_src``. :raises: ``prawcore.TooLarge`` if the overall request body is too large. :raises: :class:`.RedditAPIException` if there are other issues with the uploaded image. Unfortunately the exception info might not be very specific, so try through the website with the same image to see what the problem actually might be. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.upload_header("header.png") """ return self._upload_image(image_path, {"upload_type": "header"}) def upload_mobile_header(self, image_path: str) -> Dict[str, str]: """Upload an image to be used as the Subreddit's mobile header. :param image_path: A path to a jpeg or png image. :returns: A dictionary containing a link to the uploaded image under the key ``img_src``. :raises: ``prawcore.TooLarge`` if the overall request body is too large. :raises: :class:`.RedditAPIException` if there are other issues with the uploaded image. Unfortunately the exception info might not be very specific, so try through the website with the same image to see what the problem actually might be. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.upload_mobile_header("header.png") """ return self._upload_image(image_path, {"upload_type": "banner"}) def upload_mobile_icon(self, image_path: str) -> Dict[str, str]: """Upload an image to be used as the Subreddit's mobile icon. :param image_path: A path to a jpeg or png image. :returns: A dictionary containing a link to the uploaded image under the key ``img_src``. :raises: ``prawcore.TooLarge`` if the overall request body is too large. :raises: :class:`.RedditAPIException` if there are other issues with the uploaded image. Unfortunately the exception info might not be very specific, so try through the website with the same image to see what the problem actually might be. For example: .. code-block:: python reddit.subreddit("SUBREDDIT").stylesheet.upload_mobile_icon("icon.png") """ return self._upload_image(image_path, {"upload_type": "icon"}) class SubredditWiki: """Provides a set of wiki functions to a Subreddit.""" def __getitem__(self, page_name: str) -> WikiPage: """Lazily return the WikiPage for the subreddit named ``page_name``. This method is to be used to fetch a specific wikipage, like so: .. code-block:: python wikipage = reddit.subreddit("iama").wiki["proof"] print(wikipage.content_md) """ return WikiPage(self.subreddit._reddit, self.subreddit, page_name.lower()) def __init__(self, subreddit: "praw.models.Subreddit"): """Create a SubredditWiki instance. :param subreddit: The subreddit whose wiki to work with. """ self.banned = SubredditRelationship(subreddit, "wikibanned") self.contributor = SubredditRelationship(subreddit, "wikicontributor") self.subreddit = subreddit def __iter__(self) -> Generator[WikiPage, None, None]: """Iterate through the pages of the wiki. This method is to be used to discover all wikipages for a subreddit: .. code-block:: python for wikipage in reddit.subreddit("iama").wiki: print(wikipage) """ response = self.subreddit._reddit.get( API_PATH["wiki_pages"].format(subreddit=self.subreddit), params={"unique": self.subreddit._reddit._next_unique}, ) for page_name in response["data"]: yield WikiPage(self.subreddit._reddit, self.subreddit, page_name) def create( self, name: str, content: str, reason: Optional[str] = None, **other_settings: Any, ): """Create a new wiki page. :param name: The name of the new WikiPage. This name will be normalized. :param content: The content of the new WikiPage. :param reason: (Optional) The reason for the creation. :param other_settings: Additional keyword arguments to pass. To create the wiki page ``praw_test`` in ``r/test`` try: .. code-block:: python reddit.subreddit("test").wiki.create( "praw_test", "wiki body text", reason="PRAW Test Creation" ) """ name = name.replace(" ", "_").lower() new = WikiPage(self.subreddit._reddit, self.subreddit, name) new.edit(content=content, reason=reason, **other_settings) return new def revisions( self, **generator_kwargs: Any ) -> Generator[ Dict[str, Optional[Union["praw.models.Redditor", WikiPage, str, int, bool]]], None, None, ]: """Return a :class:`.ListingGenerator` for recent wiki revisions. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. To view the wiki revisions for ``"praw_test"`` in ``r/test`` try: .. code-block:: python for item in reddit.subreddit("test").wiki["praw_test"].revisions(): print(item) """ url = API_PATH["wiki_revisions"].format(subreddit=self.subreddit) return WikiPage._revision_generator(self.subreddit, url, generator_kwargs)
import itertools import logging from abc import ABC, abstractmethod, ABCMeta from copy import copy from networkx import DiGraph, topological_sort from networkx.drawing.nx_agraph import to_agraph from lib.utils import powerset, format_dict, powerdict logger = logging.getLogger("halpern_pearl") logging.basicConfig(level=logging.INFO) class Variable: def __init__(self, symbol): self.symbol = symbol def __hash__(self): return hash((type(self), self.symbol)) def __eq__(self, other): return self.symbol == other.symbol def __str__(self): return f"{self.symbol}" def __repr__(self): return self.__str__() def __lt__(self, other): return self.symbol < other.symbol class Event(ABC): @abstractmethod def entailed_by(self, causal_setting): raise NotImplemented @abstractmethod def variables(self): raise NotImplemented def __repr__(self): return self.__str__() class PrimitiveEvent(Event): def __init__(self, variable, value): self.variable = variable self.value = value def entailed_by(self, causal_setting): return causal_setting.values[self.variable] == self.value def variables(self): return {self.variable} def __str__(self): return f"{self.variable}={self.value}" class Negation(Event): def __init__(self, child): self.child = child def entailed_by(self, causal_setting): return not self.child.entailed_by(causal_setting) def variables(self): return self.child.variables() def __str__(self): return f"!({self.child})" class BinaryFormula(Event, metaclass=ABCMeta): def __init__(self, left_child, right_child): self.left_child = left_child self.right_child = right_child def variables(self): return self.left_child.variables() | self.right_child.variables() class Conjunction(BinaryFormula): def entailed_by(self, causal_setting): return self.left_child.entailed_by(causal_setting) and self.right_child.entailed_by(causal_setting) def __str__(self): return f"({self.left_child} & {self.right_child})" class Disjunction(BinaryFormula): def entailed_by(self, causal_setting): return self.left_child.entailed_by(causal_setting) or self.right_child.entailed_by(causal_setting) def __str__(self): return f"({self.left_child} | {self.right_child})" def assignments2conjunction(assignments, right_child=None): assert assignments assignments_remainder = copy(assignments) variable, value = assignments_remainder.popitem() # pops items in reverse order, which is important for respecting order of operations primitive_event = PrimitiveEvent(variable, value) formula = Conjunction(primitive_event, right_child) if right_child else primitive_event return assignments2conjunction(assignments_remainder, formula) if assignments_remainder else formula class CausalNetwork: def __init__(self): self.graph = DiGraph() self.structural_equations = dict() self.endogenous_bindings = dict() def add_dependency(self, endogenous_variable, parents, structural_equation): for parent_variable in parents: self.graph.add_edge(parent_variable, endogenous_variable) self.structural_equations[endogenous_variable] = structural_equation def evaluate(self, context): values = copy(context) for variable in topological_sort(self.graph): if variable not in values: values[variable] = self.structural_equation(variable, values) return {key: value for key, value in values.items() if key not in context} def signature(self): in_degrees = self.graph.in_degree() return {v for v, d in in_degrees if d == 0}, {v for v, d in in_degrees if d != 0} def structural_equation(self, variable, parent_values): return self.endogenous_bindings[variable] if variable in self.endogenous_bindings else self.structural_equations[variable](parent_values) def intervene(self, intervention): new_causal_network = CausalNetwork() _, endogenous_variables = self.signature() for variable in endogenous_variables: new_causal_network.add_dependency(variable, self.graph.predecessors(variable), self.structural_equations[variable]) for variable, value in intervention.items(): new_causal_network.endogenous_bindings[variable] = value return new_causal_network def write(self, path, prog="dot"): # prog=neato|dot|twopi|circo|fdp|nop to_agraph(self.graph).draw(path, prog=prog) class CausalSetting: def __init__(self, causal_network, context, exogenous_domains, endogenous_domains): self.causal_network = causal_network self.context = context # dict mapping exogenous variables to values self.exogenous_domains = exogenous_domains self.endogenous_domains = endogenous_domains exogenous_variables, endogenous_variables = self.causal_network.signature() assert exogenous_variables == set(self.context.keys()) assert exogenous_variables == set(self.exogenous_domains.keys()) assert endogenous_variables == set(self.endogenous_domains.keys()) assert all(self.context[exogenous_variable] in domain for exogenous_variable, domain in self.exogenous_domains.items()) self.derived_values = self.causal_network.evaluate(self.context) self.values = {**self.context, **self.derived_values} assert all(self.values[endogenous_variable] in domain for endogenous_variable, domain in self.endogenous_domains.items()) class CausalFormula: def __init__(self, intervention, event): self.intervention = intervention # dict mapping endogenous variables to values self.event = event # Boolean combination of primitive events def entailed_by(self, causal_setting): new_causal_network = causal_setting.causal_network.intervene(self.intervention) new_causal_setting = CausalSetting(new_causal_network, causal_setting.context, causal_setting.exogenous_domains, causal_setting.endogenous_domains) return self.event.entailed_by(new_causal_setting) def __str__(self): return f"[{format_dict(self.intervention, sep_item="; ", sep_key_value="<-", brackets=False)}]({self.event})" def satisfies_ac1(candidate, event, causal_setting): if not candidate: return False if not assignments2conjunction(candidate).entailed_by(causal_setting): return False if not event.entailed_by(causal_setting): return False return True def find_witnesses_ac2(candidate, event, causal_setting): x = {candidate_variable: causal_setting.values[candidate_variable] for candidate_variable in candidate} all_w = {other_variable: causal_setting.values[other_variable] for other_variable in causal_setting.endogenous_domains.keys() - candidate.keys()} x_variables_tuple = sorted(x.keys()) x_domains_tuple = [causal_setting.endogenous_domains[variable] - {x[variable]} for variable in x_variables_tuple] # only consider "remaining" values in domain for x_prime_values_tuple in itertools.product(*x_domains_tuple): x_prime = {variable: value for variable, value in zip(x_variables_tuple, x_prime_values_tuple)} for w in powerdict(all_w): witness = {**x_prime, **w} casual_formula = CausalFormula(witness, Negation(event)) if casual_formula.entailed_by(causal_setting): yield witness def satisfies_ac2(candidate, event, causal_setting): if not candidate: return False for _ in find_witnesses_ac2(candidate, event, causal_setting): return True # there is at least one witness return False def satisfies_ac3(candidate, event, causal_setting): for subset_candidate in powerdict(candidate): if subset_candidate != candidate: if satisfies_ac1(subset_candidate, event, causal_setting) and satisfies_ac2(subset_candidate, event, causal_setting): return False return True def is_actual_cause(candidate, event, causal_setting): if not satisfies_ac1(candidate, event, causal_setting): logger.debug("AC1 failed") return False logger.debug("AC1 passed") if not satisfies_ac2(candidate, event, causal_setting): logger.debug("AC2 passed") return False logger.debug("AC2 passed") if not satisfies_ac3(candidate, event, causal_setting): logger.debug("AC2 passed") return False logger.debug("AC3 passed") return True def satisfies_sc1(candidate, event, causal_setting): if not assignments2conjunction(candidate).entailed_by(causal_setting): return False if not event.entailed_by(causal_setting): return False return True def satisfies_sc2(candidate, event, causal_setting): for actual_cause in find_actual_causes(event, causal_setting): for variable, value in candidate.items(): if variable in actual_cause and actual_cause[variable] == value: return True return False def satisfies_sc3(candidate, event, causal_setting): for context_prime in find_exact_assignments(causal_setting.exogenous_domains, causal_setting.exogenous_domains.keys()): if not CausalFormula(candidate, event).entailed_by(CausalSetting(causal_setting.causal_network, context_prime, causal_setting.exogenous_domains, causal_setting.endogenous_domains)): return False return True def satisfies_sc4(candidate, event, causal_setting): for subset_candidate in powerdict(candidate): if subset_candidate and subset_candidate != candidate: if satisfies_sc1(subset_candidate, event, causal_setting) and satisfies_sc2(subset_candidate, event, causal_setting) and satisfies_sc3(subset_candidate, event, causal_setting): return False return True def is_sufficient_cause(candidate, event, causal_setting): # as in Halpern (2016) rather than Halpern & Pearl (2005) if not satisfies_sc1(candidate, event, causal_setting): return False if not satisfies_sc2(candidate, event, causal_setting): return False if not satisfies_sc3(candidate, event, causal_setting): return False return True def find_exact_assignments(domains, variables): assert variables variables_tuple = sorted(variables) domains_tuple = [domains[variable] for variable in variables_tuple] for values_tuple in itertools.product(*domains_tuple): yield {variable: value for variable, value in zip(variables_tuple, values_tuple)} def find_all_assignments(domains): for variables in powerset(domains.keys()): if variables: yield from find_exact_assignments(domains, variables) def find_actual_causes(event, causal_setting): for candidate in find_all_assignments(causal_setting.endogenous_domains): if is_actual_cause(candidate, event, causal_setting): yield candidate def find_sufficient_causes(event, causal_setting): for candidate in find_all_assignments(causal_setting.endogenous_domains): if is_sufficient_cause(candidate, event, causal_setting): yield candidate class EpistemicState: def __init__(self, causal_network, contexts, exogenous_domains, endogenous_domains): self.causal_network = causal_network self.contexts = contexts self.exogenous_domains = exogenous_domains self.endogenous_domains = endogenous_domains def causal_settings(self): for context in self.contexts: yield CausalSetting(self.causal_network, context, self.exogenous_domains, self.endogenous_domains) def satisfies_ex1(candidate, event, epistemic_state): for causal_setting in epistemic_state.causal_settings(): if Conjunction(assignments2conjunction(candidate), event).entailed_by(causal_setting): if not satisfies_sc2(candidate, event, causal_setting): return False if not CausalFormula(candidate, event).entailed_by(causal_setting): return False return True def satisfies_ex2(candidate, event, epistemic_state): for subset_candidate in powerdict(candidate): if subset_candidate and subset_candidate != candidate: if satisfies_ex1(subset_candidate, event, epistemic_state): return False return True def satisfies_ex3(candidate, event, epistemic_state): for causal_setting in epistemic_state.causal_settings(): if Conjunction(assignments2conjunction(candidate), event).entailed_by(causal_setting): return True return False def satisfies_ex4(candidate, event, epistemic_state): for causal_setting in epistemic_state.causal_settings(): if event.entailed_by(causal_setting): if Negation(assignments2conjunction(candidate)).entailed_by(causal_setting): return True return False def is_explanation(candidate, event, epistemic_state): if not satisfies_ex1(candidate, event, epistemic_state): return False if not satisfies_ex2(candidate, event, epistemic_state): return False if not satisfies_ex3(candidate, event, epistemic_state): return False return True def is_nontrivial_explanation(candidate, event, epistemic_state): if not is_explanation(candidate, event, epistemic_state): return False if not satisfies_ex4(candidate, event, epistemic_state): return False return True def is_trivial_explanation(candidate, event, epistemic_state): if not is_explanation(candidate, event, epistemic_state): return False if satisfies_ex4(candidate, event, epistemic_state): return False return True def find_explanations(event, epistemic_state): for candidate in find_all_assignments(epistemic_state.endogenous_domains): if is_explanation(candidate, event, epistemic_state): yield candidate def find_nontrivial_explanations(event, epistemic_state): for candidate in find_all_assignments(epistemic_state.endogenous_domains): if is_nontrivial_explanation(candidate, event, epistemic_state): yield candidate def find_trivial_explanations(event, epistemic_state): for candidate in find_all_assignments(epistemic_state.endogenous_domains): if is_trivial_explanation(candidate, event, epistemic_state): yield candidate
import itertools import logging from abc import ABC, abstractmethod, ABCMeta from copy import copy from networkx import DiGraph, topological_sort from networkx.drawing.nx_agraph import to_agraph from lib.utils import powerset, format_dict, powerdict logger = logging.getLogger("halpern_pearl") logging.basicConfig(level=logging.INFO) class Variable: def __init__(self, symbol): self.symbol = symbol def __hash__(self): return hash((type(self), self.symbol)) def __eq__(self, other): return self.symbol == other.symbol def __str__(self): return f"{self.symbol}" def __repr__(self): return self.__str__() def __lt__(self, other): return self.symbol < other.symbol class Event(ABC): @abstractmethod def entailed_by(self, causal_setting): raise NotImplemented @abstractmethod def variables(self): raise NotImplemented def __repr__(self): return self.__str__() class PrimitiveEvent(Event): def __init__(self, variable, value): self.variable = variable self.value = value def entailed_by(self, causal_setting): return causal_setting.values[self.variable] == self.value def variables(self): return {self.variable} def __str__(self): return f"{self.variable}={self.value}" class Negation(Event): def __init__(self, child): self.child = child def entailed_by(self, causal_setting): return not self.child.entailed_by(causal_setting) def variables(self): return self.child.variables() def __str__(self): return f"!({self.child})" class BinaryFormula(Event, metaclass=ABCMeta): def __init__(self, left_child, right_child): self.left_child = left_child self.right_child = right_child def variables(self): return self.left_child.variables() | self.right_child.variables() class Conjunction(BinaryFormula): def entailed_by(self, causal_setting): return self.left_child.entailed_by(causal_setting) and self.right_child.entailed_by(causal_setting) def __str__(self): return f"({self.left_child} & {self.right_child})" class Disjunction(BinaryFormula): def entailed_by(self, causal_setting): return self.left_child.entailed_by(causal_setting) or self.right_child.entailed_by(causal_setting) def __str__(self): return f"({self.left_child} | {self.right_child})" def assignments2conjunction(assignments, right_child=None): assert assignments assignments_remainder = copy(assignments) variable, value = assignments_remainder.popitem() # pops items in reverse order, which is important for respecting order of operations primitive_event = PrimitiveEvent(variable, value) formula = Conjunction(primitive_event, right_child) if right_child else primitive_event return assignments2conjunction(assignments_remainder, formula) if assignments_remainder else formula class CausalNetwork: def __init__(self): self.graph = DiGraph() self.structural_equations = dict() self.endogenous_bindings = dict() def add_dependency(self, endogenous_variable, parents, structural_equation): for parent_variable in parents: self.graph.add_edge(parent_variable, endogenous_variable) self.structural_equations[endogenous_variable] = structural_equation def evaluate(self, context): values = copy(context) for variable in topological_sort(self.graph): if variable not in values: values[variable] = self.structural_equation(variable, values) return {key: value for key, value in values.items() if key not in context} def signature(self): in_degrees = self.graph.in_degree() return {v for v, d in in_degrees if d == 0}, {v for v, d in in_degrees if d != 0} def structural_equation(self, variable, parent_values): return self.endogenous_bindings[variable] if variable in self.endogenous_bindings else self.structural_equations[variable](parent_values) def intervene(self, intervention): new_causal_network = CausalNetwork() _, endogenous_variables = self.signature() for variable in endogenous_variables: new_causal_network.add_dependency(variable, self.graph.predecessors(variable), self.structural_equations[variable]) for variable, value in intervention.items(): new_causal_network.endogenous_bindings[variable] = value return new_causal_network def write(self, path, prog="dot"): # prog=neato|dot|twopi|circo|fdp|nop to_agraph(self.graph).draw(path, prog=prog) class CausalSetting: def __init__(self, causal_network, context, exogenous_domains, endogenous_domains): self.causal_network = causal_network self.context = context # dict mapping exogenous variables to values self.exogenous_domains = exogenous_domains self.endogenous_domains = endogenous_domains exogenous_variables, endogenous_variables = self.causal_network.signature() assert exogenous_variables == set(self.context.keys()) assert exogenous_variables == set(self.exogenous_domains.keys()) assert endogenous_variables == set(self.endogenous_domains.keys()) assert all(self.context[exogenous_variable] in domain for exogenous_variable, domain in self.exogenous_domains.items()) self.derived_values = self.causal_network.evaluate(self.context) self.values = {**self.context, **self.derived_values} assert all(self.values[endogenous_variable] in domain for endogenous_variable, domain in self.endogenous_domains.items()) class CausalFormula: def __init__(self, intervention, event): self.intervention = intervention # dict mapping endogenous variables to values self.event = event # Boolean combination of primitive events def entailed_by(self, causal_setting): new_causal_network = causal_setting.causal_network.intervene(self.intervention) new_causal_setting = CausalSetting(new_causal_network, causal_setting.context, causal_setting.exogenous_domains, causal_setting.endogenous_domains) return self.event.entailed_by(new_causal_setting) def __str__(self): return f"[{format_dict(self.intervention, sep_item='; ', sep_key_value='<-', brackets=False)}]({self.event})" def satisfies_ac1(candidate, event, causal_setting): if not candidate: return False if not assignments2conjunction(candidate).entailed_by(causal_setting): return False if not event.entailed_by(causal_setting): return False return True def find_witnesses_ac2(candidate, event, causal_setting): x = {candidate_variable: causal_setting.values[candidate_variable] for candidate_variable in candidate} all_w = {other_variable: causal_setting.values[other_variable] for other_variable in causal_setting.endogenous_domains.keys() - candidate.keys()} x_variables_tuple = sorted(x.keys()) x_domains_tuple = [causal_setting.endogenous_domains[variable] - {x[variable]} for variable in x_variables_tuple] # only consider "remaining" values in domain for x_prime_values_tuple in itertools.product(*x_domains_tuple): x_prime = {variable: value for variable, value in zip(x_variables_tuple, x_prime_values_tuple)} for w in powerdict(all_w): witness = {**x_prime, **w} casual_formula = CausalFormula(witness, Negation(event)) if casual_formula.entailed_by(causal_setting): yield witness def satisfies_ac2(candidate, event, causal_setting): if not candidate: return False for _ in find_witnesses_ac2(candidate, event, causal_setting): return True # there is at least one witness return False def satisfies_ac3(candidate, event, causal_setting): for subset_candidate in powerdict(candidate): if subset_candidate != candidate: if satisfies_ac1(subset_candidate, event, causal_setting) and satisfies_ac2(subset_candidate, event, causal_setting): return False return True def is_actual_cause(candidate, event, causal_setting): if not satisfies_ac1(candidate, event, causal_setting): logger.debug("AC1 failed") return False logger.debug("AC1 passed") if not satisfies_ac2(candidate, event, causal_setting): logger.debug("AC2 passed") return False logger.debug("AC2 passed") if not satisfies_ac3(candidate, event, causal_setting): logger.debug("AC2 passed") return False logger.debug("AC3 passed") return True def satisfies_sc1(candidate, event, causal_setting): if not assignments2conjunction(candidate).entailed_by(causal_setting): return False if not event.entailed_by(causal_setting): return False return True def satisfies_sc2(candidate, event, causal_setting): for actual_cause in find_actual_causes(event, causal_setting): for variable, value in candidate.items(): if variable in actual_cause and actual_cause[variable] == value: return True return False def satisfies_sc3(candidate, event, causal_setting): for context_prime in find_exact_assignments(causal_setting.exogenous_domains, causal_setting.exogenous_domains.keys()): if not CausalFormula(candidate, event).entailed_by(CausalSetting(causal_setting.causal_network, context_prime, causal_setting.exogenous_domains, causal_setting.endogenous_domains)): return False return True def satisfies_sc4(candidate, event, causal_setting): for subset_candidate in powerdict(candidate): if subset_candidate and subset_candidate != candidate: if satisfies_sc1(subset_candidate, event, causal_setting) and satisfies_sc2(subset_candidate, event, causal_setting) and satisfies_sc3(subset_candidate, event, causal_setting): return False return True def is_sufficient_cause(candidate, event, causal_setting): # as in Halpern (2016) rather than Halpern & Pearl (2005) if not satisfies_sc1(candidate, event, causal_setting): return False if not satisfies_sc2(candidate, event, causal_setting): return False if not satisfies_sc3(candidate, event, causal_setting): return False return True def find_exact_assignments(domains, variables): assert variables variables_tuple = sorted(variables) domains_tuple = [domains[variable] for variable in variables_tuple] for values_tuple in itertools.product(*domains_tuple): yield {variable: value for variable, value in zip(variables_tuple, values_tuple)} def find_all_assignments(domains): for variables in powerset(domains.keys()): if variables: yield from find_exact_assignments(domains, variables) def find_actual_causes(event, causal_setting): for candidate in find_all_assignments(causal_setting.endogenous_domains): if is_actual_cause(candidate, event, causal_setting): yield candidate def find_sufficient_causes(event, causal_setting): for candidate in find_all_assignments(causal_setting.endogenous_domains): if is_sufficient_cause(candidate, event, causal_setting): yield candidate class EpistemicState: def __init__(self, causal_network, contexts, exogenous_domains, endogenous_domains): self.causal_network = causal_network self.contexts = contexts self.exogenous_domains = exogenous_domains self.endogenous_domains = endogenous_domains def causal_settings(self): for context in self.contexts: yield CausalSetting(self.causal_network, context, self.exogenous_domains, self.endogenous_domains) def satisfies_ex1(candidate, event, epistemic_state): for causal_setting in epistemic_state.causal_settings(): if Conjunction(assignments2conjunction(candidate), event).entailed_by(causal_setting): if not satisfies_sc2(candidate, event, causal_setting): return False if not CausalFormula(candidate, event).entailed_by(causal_setting): return False return True def satisfies_ex2(candidate, event, epistemic_state): for subset_candidate in powerdict(candidate): if subset_candidate and subset_candidate != candidate: if satisfies_ex1(subset_candidate, event, epistemic_state): return False return True def satisfies_ex3(candidate, event, epistemic_state): for causal_setting in epistemic_state.causal_settings(): if Conjunction(assignments2conjunction(candidate), event).entailed_by(causal_setting): return True return False def satisfies_ex4(candidate, event, epistemic_state): for causal_setting in epistemic_state.causal_settings(): if event.entailed_by(causal_setting): if Negation(assignments2conjunction(candidate)).entailed_by(causal_setting): return True return False def is_explanation(candidate, event, epistemic_state): if not satisfies_ex1(candidate, event, epistemic_state): return False if not satisfies_ex2(candidate, event, epistemic_state): return False if not satisfies_ex3(candidate, event, epistemic_state): return False return True def is_nontrivial_explanation(candidate, event, epistemic_state): if not is_explanation(candidate, event, epistemic_state): return False if not satisfies_ex4(candidate, event, epistemic_state): return False return True def is_trivial_explanation(candidate, event, epistemic_state): if not is_explanation(candidate, event, epistemic_state): return False if satisfies_ex4(candidate, event, epistemic_state): return False return True def find_explanations(event, epistemic_state): for candidate in find_all_assignments(epistemic_state.endogenous_domains): if is_explanation(candidate, event, epistemic_state): yield candidate def find_nontrivial_explanations(event, epistemic_state): for candidate in find_all_assignments(epistemic_state.endogenous_domains): if is_nontrivial_explanation(candidate, event, epistemic_state): yield candidate def find_trivial_explanations(event, epistemic_state): for candidate in find_all_assignments(epistemic_state.endogenous_domains): if is_trivial_explanation(candidate, event, epistemic_state): yield candidate
# Copyright (C) 2019 The Raphielscape Company LLC. # # Licensed under the Raphielscape Public License, Version 1.c (the "License"); # you may not use this file except in compliance with the License. # # Asena UserBot - Yusuf Usta """ Internet ile alakalı bilgileri edinmek için kullanılan UserBot modülüdür. """ from datetime import datetime from speedtest import Speedtest from telethon import functions from userbot import CMD_HELP from userbot.events import register from userbot.cmdhelp import CmdHelp # ██████ LANGUAGE CONSTANTS ██████ # from userbot.language import get_value LANG = get_value("www") # ████████████████████████████████ # @register(outgoing=True, pattern="^.speed$") async def speedtst(spd): """ .speed komutu sunucu hızını tespit etmek için SpeedTest kullanır. """ await spd.edit(LANG['SPEED']) test = Speedtest() test.get_best_server() test.download() test.upload() result = test.results.dict() await spd.edit("`" f"{LANG["STARTED_TIME"]}" f"{result["timestamp"]} \n\n" f"{LANG["DOWNLOAD_SPEED"]}" f"{speed_convert(result["download"])} \n" f"{LANG["UPLOAD_SPEED"]}" f"{speed_convert(result["upload"])} \n" "Ping: " f"{result["ping"]} \n" f"{LANG["ISP"]}" f"{result["client"]["isp"]}" "`") def speed_convert(size): """ Merhaba Asena, baytları okuyamıyor musun? """ power = 2**10 zero = 0 units = {0: '', 1: 'Kb/s', 2: 'Mb/s', 3: 'Gb/s', 4: 'Tb/s'} while size > power: size /= power zero += 1 return f"{round(size, 2)} {units[zero]}" @register(outgoing=True, pattern="^.dc$") async def neardc(event): """ .dc komutu en yakın datacenter bilgisini verir. """ result = await event.client(functions.help.GetNearestDcRequest()) await event.edit(f"Şehir : `{result.country}`\n" f"En yakın datacenter : `{result.nearest_dc}`\n" f"Şu anki datacenter : `{result.this_dc}`") @register(outgoing=True, pattern="^.ping$") async def pingme(pong): """ .ping komutu userbotun ping değerini herhangi bir sohbette gösterebilir. """ start = datetime.now() await pong.edit("`Pong!`") end = datetime.now() duration = (end - start).microseconds / 1000 await pong.edit("`Pong!\n%sms`" % (duration)) CmdHelp('www').add_command( 'speed', None, 'Bir speedtest uygular ve sonucu gösterir.' ).add_command( 'dc', None, 'Sunucunuza en yakın datacenter\'ı gösterir.' ).add_command( 'ping', None, 'Botun ping değerini gösterir.' ).add()
# Copyright (C) 2019 The Raphielscape Company LLC. # # Licensed under the Raphielscape Public License, Version 1.c (the "License"); # you may not use this file except in compliance with the License. # # Asena UserBot - Yusuf Usta """ Internet ile alakalı bilgileri edinmek için kullanılan UserBot modülüdür. """ from datetime import datetime from speedtest import Speedtest from telethon import functions from userbot import CMD_HELP from userbot.events import register from userbot.cmdhelp import CmdHelp # ██████ LANGUAGE CONSTANTS ██████ # from userbot.language import get_value LANG = get_value("www") # ████████████████████████████████ # @register(outgoing=True, pattern="^.speed$") async def speedtst(spd): """ .speed komutu sunucu hızını tespit etmek için SpeedTest kullanır. """ await spd.edit(LANG['SPEED']) test = Speedtest() test.get_best_server() test.download() test.upload() result = test.results.dict() await spd.edit("`" f"{LANG['STARTED_TIME']}" f"{result['timestamp']} \n\n" f"{LANG['DOWNLOAD_SPEED']}" f"{speed_convert(result['download'])} \n" f"{LANG['UPLOAD_SPEED']}" f"{speed_convert(result['upload'])} \n" "Ping: " f"{result['ping']} \n" f"{LANG['ISP']}" f"{result['client']['isp']}" "`") def speed_convert(size): """ Merhaba Asena, baytları okuyamıyor musun? """ power = 2**10 zero = 0 units = {0: '', 1: 'Kb/s', 2: 'Mb/s', 3: 'Gb/s', 4: 'Tb/s'} while size > power: size /= power zero += 1 return f"{round(size, 2)} {units[zero]}" @register(outgoing=True, pattern="^.dc$") async def neardc(event): """ .dc komutu en yakın datacenter bilgisini verir. """ result = await event.client(functions.help.GetNearestDcRequest()) await event.edit(f"Şehir : `{result.country}`\n" f"En yakın datacenter : `{result.nearest_dc}`\n" f"Şu anki datacenter : `{result.this_dc}`") @register(outgoing=True, pattern="^.ping$") async def pingme(pong): """ .ping komutu userbotun ping değerini herhangi bir sohbette gösterebilir. """ start = datetime.now() await pong.edit("`Pong!`") end = datetime.now() duration = (end - start).microseconds / 1000 await pong.edit("`Pong!\n%sms`" % (duration)) CmdHelp('www').add_command( 'speed', None, 'Bir speedtest uygular ve sonucu gösterir.' ).add_command( 'dc', None, 'Sunucunuza en yakın datacenter\'ı gösterir.' ).add_command( 'ping', None, 'Botun ping değerini gösterir.' ).add()
import json from hashlib import sha256 from collections import Counter from inputimeout import inputimeout, TimeoutOccurred import tabulate, copy, time, datetime, requests, sys, os, random from captcha import captcha_builder BOOKING_URL = "https://cdn-api.co-vin.in/api/v2/appointment/schedule" BENEFICIARIES_URL = "https://cdn-api.co-vin.in/api/v2/appointment/beneficiaries" CALENDAR_URL_DISTRICT = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/calendarByDistrict?district_id={0}&date={1}" CALENDAR_URL_PINCODE = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/calendarByPin?pincode={0}&date={1}" CAPTCHA_URL = "https://cdn-api.co-vin.in/api/v2/auth/getRecaptcha" OTP_PUBLIC_URL = 'https://cdn-api.co-vin.in/api/v2/auth/public/generateOTP' OTP_PRO_URL = 'https://cdn-api.co-vin.in/api/v2/auth/generateMobileOTP' WARNING_BEEP_DURATION = (1000, 2000) try: import winsound except ImportError: import os if sys.platform == "darwin": def beep(freq, duration): # brew install SoX --> install SOund eXchange universal sound sample translator on mac os.system( f"play -n synth {duration/1000} sin {freq} >/dev/null 2>&1") else: def beep(freq, duration): # apt-get install beep --> install beep package on linux distros before running os.system('beep -f %s -l %s' % (freq, duration)) else: def beep(freq, duration): winsound.Beep(freq, duration) def viable_options(resp, minimum_slots, min_age_booking, fee_type): options = [] if len(resp['centers']) >= 0: for center in resp['centers']: for session in center['sessions']: if (session['available_capacity'] >= minimum_slots) \ and (session['min_age_limit'] <= min_age_booking)\ and (center['fee_type'] in fee_type): out = { 'name': center['name'], 'district': center['district_name'], 'pincode': center['pincode'], 'center_id': center['center_id'], 'available': session['available_capacity'], 'date': session['date'], 'slots': session['slots'], 'session_id': session['session_id'] } options.append(out) else: pass else: pass return options def display_table(dict_list): """ This function 1. Takes a list of dictionary 2. Add an Index column, and 3. Displays the data in tabular format """ header = ['idx'] + list(dict_list[0].keys()) rows = [[idx + 1] + list(x.values()) for idx, x in enumerate(dict_list)] print(tabulate.tabulate(rows, header, tablefmt='grid')) def display_info_dict(details): for key, value in details.items(): if isinstance(value, list): if all(isinstance(item, dict) for item in value): print(f"\t{key}:") display_table(value) else: print(f"\t{key}\t: {value}") else: print(f"\t{key}\t: {value}") def confirm_and_proceed(collected_details): print("\n================================= Confirm Info =================================\n") display_info_dict(collected_details) confirm = input("\nProceed with above info (y/n Default y) : ") confirm = confirm if confirm else 'y' if confirm != 'y': print("Details not confirmed. Exiting process.") os.system("pause") sys.exit() def save_user_info(filename, details): print("\n================================= Save Info =================================\n") save_info = input("Would you like to save this as a JSON file for easy use next time?: (y/n Default y): ") save_info = save_info if save_info else 'y' if save_info == 'y': with open(filename, 'w') as f: json.dump(details, f) print(f"Info saved to {filename} in {os.getcwd()}") def get_saved_user_info(filename): with open(filename, 'r') as f: data = json.load(f) return data def collect_user_details(request_header): # Get Beneficiaries print("Fetching registered beneficiaries.. ") beneficiary_dtls = get_beneficiaries(request_header) if len(beneficiary_dtls) == 0: print("There should be at least one beneficiary. Exiting.") os.system("pause") sys.exit(1) # Make sure all beneficiaries have the same type of vaccine vaccine_types = [beneficiary['vaccine'] for beneficiary in beneficiary_dtls] vaccines = Counter(vaccine_types) if len(vaccines.keys()) != 1: print(f"All beneficiaries in one attempt should have the same vaccine type. Found {len(vaccines.keys())}") os.system("pause") sys.exit(1) vaccine_type = vaccine_types[0] # if all([beneficiary['status'] == 'Partially Vaccinated' for beneficiary in beneficiary_dtls]) else None if not vaccine_type: print("\n================================= Vaccine Info =================================\n") vaccine_type = get_vaccine_preference() print("\n================================= Location Info =================================\n") # get search method to use search_option = input( """Search by Pincode? Or by State/District? \nEnter 1 for Pincode or 2 for State/District. (Default 2) : """) if not search_option or int(search_option) not in [1, 2]: search_option = 2 else: search_option = int(search_option) if search_option == 2: # Collect vaccination center preferance location_dtls = get_districts(request_header) else: # Collect vaccination center preferance location_dtls = get_pincodes() print("\n================================= Additional Info =================================\n") # Set filter condition minimum_slots = input(f'Filter out centers with availability less than ? Minimum {len(beneficiary_dtls)} : ') if minimum_slots: minimum_slots = int(minimum_slots) if int(minimum_slots) >= len(beneficiary_dtls) else len(beneficiary_dtls) else: minimum_slots = len(beneficiary_dtls) # Get refresh frequency refresh_freq = input('How often do you want to refresh the calendar (in seconds)? Default 5. Minimum 3. : ') refresh_freq = int(refresh_freq) if refresh_freq and int(refresh_freq) >= 3 else 5 # Get search start date start_date = input( '\nSearch for next seven day starting from when?\nUse 1 for today, 2 for tomorrow, or provide a date in the format DD-MM-YYYY. Default 2: ') if not start_date: start_date = 2 elif start_date in ['1', '2']: start_date = int(start_date) else: try: datetime.datetime.strptime(start_date, '%d-%m-%Y') except ValueError: print('Invalid Date! Proceeding with tomorrow.') start_date = 2 # Get preference of Free/Paid option fee_type = get_fee_type_preference() print("\n=========== CAUTION! =========== CAUTION! CAUTION! =============== CAUTION! =======\n") print("===== BE CAREFUL WITH THIS OPTION! AUTO-BOOKING WILL BOOK THE FIRST AVAILABLE CENTRE, DATE, AND A RANDOM SLOT! =====") auto_book = input("Do you want to enable auto-booking? (yes-please or no) Default no: ") auto_book = 'no' if not auto_book else auto_book collected_details = { 'beneficiary_dtls': beneficiary_dtls, 'location_dtls': location_dtls, 'search_option': search_option, 'minimum_slots': minimum_slots, 'refresh_freq': refresh_freq, 'auto_book': auto_book, 'start_date': start_date, 'vaccine_type': vaccine_type, 'fee_type': fee_type } return collected_details def check_calendar_by_district(request_header, vaccine_type, location_dtls, start_date, minimum_slots, min_age_booking, fee_type): """ This function 1. Takes details required to check vaccination calendar 2. Filters result by minimum number of slots available 3. Returns False if token is invalid 4. Returns list of vaccination centers & slots if available """ try: print('===================================================================================') today = datetime.datetime.today() base_url = CALENDAR_URL_DISTRICT if vaccine_type: base_url += f"&vaccine={vaccine_type}" options = [] for location in location_dtls: resp = requests.get(base_url.format(location['district_id'], start_date), headers=request_header) if resp.status_code == 401: print('TOKEN INVALID') return False elif resp.status_code == 200: resp = resp.json() if 'centers' in resp: print(f"Centers available in {location["district_name"]} from {start_date} as of {today.strftime("%Y-%m-%d %H:%M:%S")}: {len(resp["centers"])}") options += viable_options(resp, minimum_slots, min_age_booking, fee_type) else: pass for location in location_dtls: if location['district_name'] in [option['district'] for option in options]: for _ in range(2): beep(location['alert_freq'], 150) return options except Exception as e: print(str(e)) beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1]) def check_calendar_by_pincode(request_header, vaccine_type, location_dtls, start_date, minimum_slots, min_age_booking, fee_type): """ This function 1. Takes details required to check vaccination calendar 2. Filters result by minimum number of slots available 3. Returns False if token is invalid 4. Returns list of vaccination centers & slots if available """ try: print('===================================================================================') today = datetime.datetime.today() base_url = CALENDAR_URL_PINCODE if vaccine_type: base_url += f"&vaccine={vaccine_type}" options = [] for location in location_dtls: resp = requests.get(base_url.format(location['pincode'], start_date), headers=request_header) if resp.status_code == 401: print('TOKEN INVALID') return False elif resp.status_code == 200: resp = resp.json() if 'centers' in resp: print(f"Centers available in {location["pincode"]} from {start_date} as of {today.strftime("%Y-%m-%d %H:%M:%S")}: {len(resp["centers"])}") options += viable_options(resp, minimum_slots, min_age_booking, fee_type) else: pass for location in location_dtls: if int(location['pincode']) in [option['pincode'] for option in options]: for _ in range(2): beep(location['alert_freq'], 150) return options except Exception as e: print(str(e)) beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1]) def generate_captcha(request_header): print('================================= GETTING CAPTCHA ==================================================') resp = requests.post(CAPTCHA_URL, headers=request_header) print(f'Captcha Response Code: {resp.status_code}') if resp.status_code == 200: return captcha_builder(resp.json()) def book_appointment(request_header, details): """ This function 1. Takes details in json format 2. Attempts to book an appointment using the details 3. Returns True or False depending on Token Validity """ try: valid_captcha = True while valid_captcha: captcha = generate_captcha(request_header) details['captcha'] = captcha print('================================= ATTEMPTING BOOKING ==================================================') resp = requests.post(BOOKING_URL, headers=request_header, json=details) print(f'Booking Response Code: {resp.status_code}') print(f'Booking Response : {resp.text}') if resp.status_code == 401: print('TOKEN INVALID') return False elif resp.status_code == 200: beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1]) print('############## BOOKED! ############################ BOOKED! ##############') print(" Hey, Hey, Hey! It's your lucky day! ") print('\nPress any key thrice to exit program.') os.system("pause") os.system("pause") os.system("pause") sys.exit() elif resp.status_code == 400: print(f'Response: {resp.status_code} : {resp.text}') pass else: print(f'Response: {resp.status_code} : {resp.text}') return True except Exception as e: print(str(e)) beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1]) def check_and_book(request_header, beneficiary_dtls, location_dtls, search_option, **kwargs): """ This function 1. Checks the vaccination calendar for available slots, 2. Lists all viable options, 3. Takes user's choice of vaccination center and slot, 4. Calls function to book appointment, and 5. Returns True or False depending on Token Validity """ try: min_age_booking = get_min_age(beneficiary_dtls) minimum_slots = kwargs['min_slots'] refresh_freq = kwargs['ref_freq'] auto_book = kwargs['auto_book'] start_date = kwargs['start_date'] vaccine_type = kwargs['vaccine_type'] fee_type = kwargs['fee_type'] if isinstance(start_date, int) and start_date == 2: start_date = (datetime.datetime.today() + datetime.timedelta(days=1)).strftime("%d-%m-%Y") elif isinstance(start_date, int) and start_date == 1: start_date = datetime.datetime.today().strftime("%d-%m-%Y") else: pass if search_option == 2: options = check_calendar_by_district(request_header, vaccine_type, location_dtls, start_date, minimum_slots, min_age_booking, fee_type) else: options = check_calendar_by_pincode(request_header, vaccine_type, location_dtls, start_date, minimum_slots, min_age_booking, fee_type) if isinstance(options, bool): return False options = sorted(options, key=lambda k: (k['district'].lower(), k['pincode'], k['name'].lower(), datetime.datetime.strptime(k['date'], "%d-%m-%Y")) ) tmp_options = copy.deepcopy(options) if len(tmp_options) > 0: cleaned_options_for_display = [] for item in tmp_options: item.pop('session_id', None) item.pop('center_id', None) cleaned_options_for_display.append(item) display_table(cleaned_options_for_display) if auto_book == 'yes-please': print("AUTO-BOOKING IS ENABLED. PROCEEDING WITH FIRST CENTRE, DATE, and RANDOM SLOT.") option = options[0] random_slot = random.randint(1, len(option['slots'])) choice = f'1.{random_slot}' else: choice = inputimeout( prompt='----------> Wait 20 seconds for updated options OR \n----------> Enter a choice e.g: 1.4 for (1st center 4th slot): ', timeout=20) else: for i in range(refresh_freq, 0, -1): msg = f"No viable options. Next update in {i} seconds.." print(msg, end="\r", flush=True) sys.stdout.flush() time.sleep(1) choice = '.' except TimeoutOccurred: time.sleep(1) return True else: if choice == '.': return True else: try: choice = choice.split('.') choice = [int(item) for item in choice] print(f'============> Got Choice: Center #{choice[0]}, Slot #{choice[1]}') new_req = { 'beneficiaries': [beneficiary['bref_id'] for beneficiary in beneficiary_dtls], 'dose': 2 if [beneficiary['status'] for beneficiary in beneficiary_dtls][0] == 'Partially Vaccinated' else 1, 'center_id' : options[choice[0] - 1]['center_id'], 'session_id': options[choice[0] - 1]['session_id'], 'slot' : options[choice[0] - 1]['slots'][choice[1] - 1] } print(f'Booking with info: {new_req}') return book_appointment(request_header, new_req) except IndexError: print("============> Invalid Option!") os.system("pause") pass def get_vaccine_preference(): print("It seems you're trying to find a slot for your first dose. Do you have a vaccine preference?") preference = input("Enter 0 for No Preference, 1 for COVISHIELD, or 2 for COVAXIN. Default 0 : ") preference = int(preference) if preference and int(preference) in [0, 1, 2] else 0 if preference == 1: return 'COVISHIELD' elif preference == 2: return 'COVAXIN' else: return None def get_fee_type_preference(): print("\nDo you have a fee type preference?") preference = input("Enter 0 for No Preference, 1 for Free Only, or 2 for Paid Only. Default 0 : ") preference = int(preference) if preference and int(preference) in [0, 1, 2] else 0 if preference == 1: return ['Free'] elif preference == 2: return ['Paid'] else: return ['Free', 'Paid'] def get_pincodes(): locations = [] pincodes = input("Enter comma separated pincodes to monitor: ") for idx, pincode in enumerate(pincodes.split(',')): pincode = { 'pincode': pincode, 'alert_freq': 440 + ((2 * idx) * 110) } locations.append(pincode) return locations def get_districts(request_header): """ This function 1. Lists all states, prompts to select one, 2. Lists all districts in that state, prompts to select required ones, and 3. Returns the list of districts as list(dict) """ states = requests.get('https://cdn-api.co-vin.in/api/v2/admin/location/states', headers=request_header) if states.status_code == 200: states = states.json()['states'] refined_states = [] for state in states: tmp = {'state': state['state_name']} refined_states.append(tmp) display_table(refined_states) state = int(input('\nEnter State index: ')) state_id = states[state - 1]['state_id'] districts = requests.get(f'https://cdn-api.co-vin.in/api/v2/admin/location/districts/{state_id}', headers=request_header) if districts.status_code == 200: districts = districts.json()['districts'] refined_districts = [] for district in districts: tmp = {'district': district['district_name']} refined_districts.append(tmp) display_table(refined_districts) reqd_districts = input('\nEnter comma separated index numbers of districts to monitor : ') districts_idx = [int(idx) - 1 for idx in reqd_districts.split(',')] reqd_districts = [{ 'district_id': item['district_id'], 'district_name': item['district_name'], 'alert_freq': 440 + ((2 * idx) * 110) } for idx, item in enumerate(districts) if idx in districts_idx] print(f'Selected districts: ') display_table(reqd_districts) return reqd_districts else: print('Unable to fetch districts') print(districts.status_code) print(districts.text) os.system("pause") sys.exit(1) else: print('Unable to fetch states') print(states.status_code) print(states.text) os.system("pause") sys.exit(1) def get_beneficiaries(request_header): """ This function 1. Fetches all beneficiaries registered under the mobile number, 2. Prompts user to select the applicable beneficiaries, and 3. Returns the list of beneficiaries as list(dict) """ beneficiaries = requests.get(BENEFICIARIES_URL, headers=request_header) if beneficiaries.status_code == 200: beneficiaries = beneficiaries.json()['beneficiaries'] refined_beneficiaries = [] for beneficiary in beneficiaries: beneficiary['age'] = datetime.datetime.today().year - int(beneficiary['birth_year']) tmp = { 'bref_id': beneficiary['beneficiary_reference_id'], 'name': beneficiary['name'], 'vaccine': beneficiary['vaccine'], 'age': beneficiary['age'], 'status': beneficiary['vaccination_status'] } refined_beneficiaries.append(tmp) display_table(refined_beneficiaries) print(""" ################# IMPORTANT NOTES ################# # 1. While selecting beneficiaries, make sure that selected beneficiaries are all taking the same dose: either first OR second. # Please do no try to club together booking for first dose for one beneficiary and second dose for another beneficiary. # # 2. While selecting beneficiaries, also make sure that beneficiaries selected for second dose are all taking the same vaccine: COVISHIELD OR COVAXIN. # Please do no try to club together booking for beneficiary taking COVISHIELD with beneficiary taking COVAXIN. # # 3. If you're selecting multiple beneficiaries, make sure all are of the same age group (45+ or 18+) as defined by the govt. # Please do not try to club together booking for younger and older beneficiaries. ################################################### """) reqd_beneficiaries = input('Enter comma separated index numbers of beneficiaries to book for : ') beneficiary_idx = [int(idx) - 1 for idx in reqd_beneficiaries.split(',')] reqd_beneficiaries = [{ 'bref_id': item['beneficiary_reference_id'], 'name': item['name'], 'vaccine': item['vaccine'], 'age': item['age'], 'status': item['vaccination_status'] } for idx, item in enumerate(beneficiaries) if idx in beneficiary_idx] print(f'Selected beneficiaries: ') display_table(reqd_beneficiaries) return reqd_beneficiaries else: print('Unable to fetch beneficiaries') print(beneficiaries.status_code) print(beneficiaries.text) os.system("pause") return [] def get_min_age(beneficiary_dtls): """ This function returns a min age argument, based on age of all beneficiaries :param beneficiary_dtls: :return: min_age:int """ age_list = [item['age'] for item in beneficiary_dtls] min_age = min(age_list) return min_age def generate_token_OTP(mobile, request_header): """ This function generate OTP and returns a new token """ if not mobile: print("Mobile number cannot be empty") os.system('pause') sys.exit() valid_token = False while not valid_token: try: data = {"mobile": mobile, "secret": "U2FsdGVkX1+z/4Nr9nta+2DrVJSv7KS6VoQUSQ1ZXYDx/CJUkWxFYG6P3iM/VW+6jLQ9RDQVzp/RcZ8kbT41xw==" } txnId = requests.post(url=OTP_PRO_URL, json=data, headers=request_header) if txnId.status_code == 200: print(f"Successfully requested OTP for mobile number {mobile} at {datetime.datetime.today()}..") txnId = txnId.json()['txnId'] OTP = input("Enter OTP (If this takes more than 2 minutes, press Enter to retry): ") if OTP: data = {"otp": sha256(str(OTP).encode('utf-8')).hexdigest(), "txnId": txnId} print(f"Validating OTP..") token = requests.post(url='https://cdn-api.co-vin.in/api/v2/auth/validateMobileOtp', json=data, headers=request_header) if token.status_code == 200: token = token.json()['token'] print(f'Token Generated: {token}') valid_token = True return token else: print('Unable to Validate OTP') print(f"Response: {token.text}") retry = input(f"Retry with {mobile} ? (y/n Default y): ") retry = retry if retry else 'y' if retry == 'y': pass else: sys.exit() else: print('Unable to Generate OTP') print(txnId.status_code, txnId.text) retry = input(f"Retry with {mobile} ? (y/n Default y): ") retry = retry if retry else 'y' if retry == 'y': pass else: sys.exit() except Exception as e: print(str(e))
import json from hashlib import sha256 from collections import Counter from inputimeout import inputimeout, TimeoutOccurred import tabulate, copy, time, datetime, requests, sys, os, random from captcha import captcha_builder BOOKING_URL = "https://cdn-api.co-vin.in/api/v2/appointment/schedule" BENEFICIARIES_URL = "https://cdn-api.co-vin.in/api/v2/appointment/beneficiaries" CALENDAR_URL_DISTRICT = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/calendarByDistrict?district_id={0}&date={1}" CALENDAR_URL_PINCODE = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/calendarByPin?pincode={0}&date={1}" CAPTCHA_URL = "https://cdn-api.co-vin.in/api/v2/auth/getRecaptcha" OTP_PUBLIC_URL = 'https://cdn-api.co-vin.in/api/v2/auth/public/generateOTP' OTP_PRO_URL = 'https://cdn-api.co-vin.in/api/v2/auth/generateMobileOTP' WARNING_BEEP_DURATION = (1000, 2000) try: import winsound except ImportError: import os if sys.platform == "darwin": def beep(freq, duration): # brew install SoX --> install SOund eXchange universal sound sample translator on mac os.system( f"play -n synth {duration/1000} sin {freq} >/dev/null 2>&1") else: def beep(freq, duration): # apt-get install beep --> install beep package on linux distros before running os.system('beep -f %s -l %s' % (freq, duration)) else: def beep(freq, duration): winsound.Beep(freq, duration) def viable_options(resp, minimum_slots, min_age_booking, fee_type): options = [] if len(resp['centers']) >= 0: for center in resp['centers']: for session in center['sessions']: if (session['available_capacity'] >= minimum_slots) \ and (session['min_age_limit'] <= min_age_booking)\ and (center['fee_type'] in fee_type): out = { 'name': center['name'], 'district': center['district_name'], 'pincode': center['pincode'], 'center_id': center['center_id'], 'available': session['available_capacity'], 'date': session['date'], 'slots': session['slots'], 'session_id': session['session_id'] } options.append(out) else: pass else: pass return options def display_table(dict_list): """ This function 1. Takes a list of dictionary 2. Add an Index column, and 3. Displays the data in tabular format """ header = ['idx'] + list(dict_list[0].keys()) rows = [[idx + 1] + list(x.values()) for idx, x in enumerate(dict_list)] print(tabulate.tabulate(rows, header, tablefmt='grid')) def display_info_dict(details): for key, value in details.items(): if isinstance(value, list): if all(isinstance(item, dict) for item in value): print(f"\t{key}:") display_table(value) else: print(f"\t{key}\t: {value}") else: print(f"\t{key}\t: {value}") def confirm_and_proceed(collected_details): print("\n================================= Confirm Info =================================\n") display_info_dict(collected_details) confirm = input("\nProceed with above info (y/n Default y) : ") confirm = confirm if confirm else 'y' if confirm != 'y': print("Details not confirmed. Exiting process.") os.system("pause") sys.exit() def save_user_info(filename, details): print("\n================================= Save Info =================================\n") save_info = input("Would you like to save this as a JSON file for easy use next time?: (y/n Default y): ") save_info = save_info if save_info else 'y' if save_info == 'y': with open(filename, 'w') as f: json.dump(details, f) print(f"Info saved to {filename} in {os.getcwd()}") def get_saved_user_info(filename): with open(filename, 'r') as f: data = json.load(f) return data def collect_user_details(request_header): # Get Beneficiaries print("Fetching registered beneficiaries.. ") beneficiary_dtls = get_beneficiaries(request_header) if len(beneficiary_dtls) == 0: print("There should be at least one beneficiary. Exiting.") os.system("pause") sys.exit(1) # Make sure all beneficiaries have the same type of vaccine vaccine_types = [beneficiary['vaccine'] for beneficiary in beneficiary_dtls] vaccines = Counter(vaccine_types) if len(vaccines.keys()) != 1: print(f"All beneficiaries in one attempt should have the same vaccine type. Found {len(vaccines.keys())}") os.system("pause") sys.exit(1) vaccine_type = vaccine_types[0] # if all([beneficiary['status'] == 'Partially Vaccinated' for beneficiary in beneficiary_dtls]) else None if not vaccine_type: print("\n================================= Vaccine Info =================================\n") vaccine_type = get_vaccine_preference() print("\n================================= Location Info =================================\n") # get search method to use search_option = input( """Search by Pincode? Or by State/District? \nEnter 1 for Pincode or 2 for State/District. (Default 2) : """) if not search_option or int(search_option) not in [1, 2]: search_option = 2 else: search_option = int(search_option) if search_option == 2: # Collect vaccination center preferance location_dtls = get_districts(request_header) else: # Collect vaccination center preferance location_dtls = get_pincodes() print("\n================================= Additional Info =================================\n") # Set filter condition minimum_slots = input(f'Filter out centers with availability less than ? Minimum {len(beneficiary_dtls)} : ') if minimum_slots: minimum_slots = int(minimum_slots) if int(minimum_slots) >= len(beneficiary_dtls) else len(beneficiary_dtls) else: minimum_slots = len(beneficiary_dtls) # Get refresh frequency refresh_freq = input('How often do you want to refresh the calendar (in seconds)? Default 5. Minimum 3. : ') refresh_freq = int(refresh_freq) if refresh_freq and int(refresh_freq) >= 3 else 5 # Get search start date start_date = input( '\nSearch for next seven day starting from when?\nUse 1 for today, 2 for tomorrow, or provide a date in the format DD-MM-YYYY. Default 2: ') if not start_date: start_date = 2 elif start_date in ['1', '2']: start_date = int(start_date) else: try: datetime.datetime.strptime(start_date, '%d-%m-%Y') except ValueError: print('Invalid Date! Proceeding with tomorrow.') start_date = 2 # Get preference of Free/Paid option fee_type = get_fee_type_preference() print("\n=========== CAUTION! =========== CAUTION! CAUTION! =============== CAUTION! =======\n") print("===== BE CAREFUL WITH THIS OPTION! AUTO-BOOKING WILL BOOK THE FIRST AVAILABLE CENTRE, DATE, AND A RANDOM SLOT! =====") auto_book = input("Do you want to enable auto-booking? (yes-please or no) Default no: ") auto_book = 'no' if not auto_book else auto_book collected_details = { 'beneficiary_dtls': beneficiary_dtls, 'location_dtls': location_dtls, 'search_option': search_option, 'minimum_slots': minimum_slots, 'refresh_freq': refresh_freq, 'auto_book': auto_book, 'start_date': start_date, 'vaccine_type': vaccine_type, 'fee_type': fee_type } return collected_details def check_calendar_by_district(request_header, vaccine_type, location_dtls, start_date, minimum_slots, min_age_booking, fee_type): """ This function 1. Takes details required to check vaccination calendar 2. Filters result by minimum number of slots available 3. Returns False if token is invalid 4. Returns list of vaccination centers & slots if available """ try: print('===================================================================================') today = datetime.datetime.today() base_url = CALENDAR_URL_DISTRICT if vaccine_type: base_url += f"&vaccine={vaccine_type}" options = [] for location in location_dtls: resp = requests.get(base_url.format(location['district_id'], start_date), headers=request_header) if resp.status_code == 401: print('TOKEN INVALID') return False elif resp.status_code == 200: resp = resp.json() if 'centers' in resp: print(f"Centers available in {location['district_name']} from {start_date} as of {today.strftime('%Y-%m-%d %H:%M:%S')}: {len(resp['centers'])}") options += viable_options(resp, minimum_slots, min_age_booking, fee_type) else: pass for location in location_dtls: if location['district_name'] in [option['district'] for option in options]: for _ in range(2): beep(location['alert_freq'], 150) return options except Exception as e: print(str(e)) beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1]) def check_calendar_by_pincode(request_header, vaccine_type, location_dtls, start_date, minimum_slots, min_age_booking, fee_type): """ This function 1. Takes details required to check vaccination calendar 2. Filters result by minimum number of slots available 3. Returns False if token is invalid 4. Returns list of vaccination centers & slots if available """ try: print('===================================================================================') today = datetime.datetime.today() base_url = CALENDAR_URL_PINCODE if vaccine_type: base_url += f"&vaccine={vaccine_type}" options = [] for location in location_dtls: resp = requests.get(base_url.format(location['pincode'], start_date), headers=request_header) if resp.status_code == 401: print('TOKEN INVALID') return False elif resp.status_code == 200: resp = resp.json() if 'centers' in resp: print(f"Centers available in {location['pincode']} from {start_date} as of {today.strftime('%Y-%m-%d %H:%M:%S')}: {len(resp['centers'])}") options += viable_options(resp, minimum_slots, min_age_booking, fee_type) else: pass for location in location_dtls: if int(location['pincode']) in [option['pincode'] for option in options]: for _ in range(2): beep(location['alert_freq'], 150) return options except Exception as e: print(str(e)) beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1]) def generate_captcha(request_header): print('================================= GETTING CAPTCHA ==================================================') resp = requests.post(CAPTCHA_URL, headers=request_header) print(f'Captcha Response Code: {resp.status_code}') if resp.status_code == 200: return captcha_builder(resp.json()) def book_appointment(request_header, details): """ This function 1. Takes details in json format 2. Attempts to book an appointment using the details 3. Returns True or False depending on Token Validity """ try: valid_captcha = True while valid_captcha: captcha = generate_captcha(request_header) details['captcha'] = captcha print('================================= ATTEMPTING BOOKING ==================================================') resp = requests.post(BOOKING_URL, headers=request_header, json=details) print(f'Booking Response Code: {resp.status_code}') print(f'Booking Response : {resp.text}') if resp.status_code == 401: print('TOKEN INVALID') return False elif resp.status_code == 200: beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1]) print('############## BOOKED! ############################ BOOKED! ##############') print(" Hey, Hey, Hey! It's your lucky day! ") print('\nPress any key thrice to exit program.') os.system("pause") os.system("pause") os.system("pause") sys.exit() elif resp.status_code == 400: print(f'Response: {resp.status_code} : {resp.text}') pass else: print(f'Response: {resp.status_code} : {resp.text}') return True except Exception as e: print(str(e)) beep(WARNING_BEEP_DURATION[0], WARNING_BEEP_DURATION[1]) def check_and_book(request_header, beneficiary_dtls, location_dtls, search_option, **kwargs): """ This function 1. Checks the vaccination calendar for available slots, 2. Lists all viable options, 3. Takes user's choice of vaccination center and slot, 4. Calls function to book appointment, and 5. Returns True or False depending on Token Validity """ try: min_age_booking = get_min_age(beneficiary_dtls) minimum_slots = kwargs['min_slots'] refresh_freq = kwargs['ref_freq'] auto_book = kwargs['auto_book'] start_date = kwargs['start_date'] vaccine_type = kwargs['vaccine_type'] fee_type = kwargs['fee_type'] if isinstance(start_date, int) and start_date == 2: start_date = (datetime.datetime.today() + datetime.timedelta(days=1)).strftime("%d-%m-%Y") elif isinstance(start_date, int) and start_date == 1: start_date = datetime.datetime.today().strftime("%d-%m-%Y") else: pass if search_option == 2: options = check_calendar_by_district(request_header, vaccine_type, location_dtls, start_date, minimum_slots, min_age_booking, fee_type) else: options = check_calendar_by_pincode(request_header, vaccine_type, location_dtls, start_date, minimum_slots, min_age_booking, fee_type) if isinstance(options, bool): return False options = sorted(options, key=lambda k: (k['district'].lower(), k['pincode'], k['name'].lower(), datetime.datetime.strptime(k['date'], "%d-%m-%Y")) ) tmp_options = copy.deepcopy(options) if len(tmp_options) > 0: cleaned_options_for_display = [] for item in tmp_options: item.pop('session_id', None) item.pop('center_id', None) cleaned_options_for_display.append(item) display_table(cleaned_options_for_display) if auto_book == 'yes-please': print("AUTO-BOOKING IS ENABLED. PROCEEDING WITH FIRST CENTRE, DATE, and RANDOM SLOT.") option = options[0] random_slot = random.randint(1, len(option['slots'])) choice = f'1.{random_slot}' else: choice = inputimeout( prompt='----------> Wait 20 seconds for updated options OR \n----------> Enter a choice e.g: 1.4 for (1st center 4th slot): ', timeout=20) else: for i in range(refresh_freq, 0, -1): msg = f"No viable options. Next update in {i} seconds.." print(msg, end="\r", flush=True) sys.stdout.flush() time.sleep(1) choice = '.' except TimeoutOccurred: time.sleep(1) return True else: if choice == '.': return True else: try: choice = choice.split('.') choice = [int(item) for item in choice] print(f'============> Got Choice: Center #{choice[0]}, Slot #{choice[1]}') new_req = { 'beneficiaries': [beneficiary['bref_id'] for beneficiary in beneficiary_dtls], 'dose': 2 if [beneficiary['status'] for beneficiary in beneficiary_dtls][0] == 'Partially Vaccinated' else 1, 'center_id' : options[choice[0] - 1]['center_id'], 'session_id': options[choice[0] - 1]['session_id'], 'slot' : options[choice[0] - 1]['slots'][choice[1] - 1] } print(f'Booking with info: {new_req}') return book_appointment(request_header, new_req) except IndexError: print("============> Invalid Option!") os.system("pause") pass def get_vaccine_preference(): print("It seems you're trying to find a slot for your first dose. Do you have a vaccine preference?") preference = input("Enter 0 for No Preference, 1 for COVISHIELD, or 2 for COVAXIN. Default 0 : ") preference = int(preference) if preference and int(preference) in [0, 1, 2] else 0 if preference == 1: return 'COVISHIELD' elif preference == 2: return 'COVAXIN' else: return None def get_fee_type_preference(): print("\nDo you have a fee type preference?") preference = input("Enter 0 for No Preference, 1 for Free Only, or 2 for Paid Only. Default 0 : ") preference = int(preference) if preference and int(preference) in [0, 1, 2] else 0 if preference == 1: return ['Free'] elif preference == 2: return ['Paid'] else: return ['Free', 'Paid'] def get_pincodes(): locations = [] pincodes = input("Enter comma separated pincodes to monitor: ") for idx, pincode in enumerate(pincodes.split(',')): pincode = { 'pincode': pincode, 'alert_freq': 440 + ((2 * idx) * 110) } locations.append(pincode) return locations def get_districts(request_header): """ This function 1. Lists all states, prompts to select one, 2. Lists all districts in that state, prompts to select required ones, and 3. Returns the list of districts as list(dict) """ states = requests.get('https://cdn-api.co-vin.in/api/v2/admin/location/states', headers=request_header) if states.status_code == 200: states = states.json()['states'] refined_states = [] for state in states: tmp = {'state': state['state_name']} refined_states.append(tmp) display_table(refined_states) state = int(input('\nEnter State index: ')) state_id = states[state - 1]['state_id'] districts = requests.get(f'https://cdn-api.co-vin.in/api/v2/admin/location/districts/{state_id}', headers=request_header) if districts.status_code == 200: districts = districts.json()['districts'] refined_districts = [] for district in districts: tmp = {'district': district['district_name']} refined_districts.append(tmp) display_table(refined_districts) reqd_districts = input('\nEnter comma separated index numbers of districts to monitor : ') districts_idx = [int(idx) - 1 for idx in reqd_districts.split(',')] reqd_districts = [{ 'district_id': item['district_id'], 'district_name': item['district_name'], 'alert_freq': 440 + ((2 * idx) * 110) } for idx, item in enumerate(districts) if idx in districts_idx] print(f'Selected districts: ') display_table(reqd_districts) return reqd_districts else: print('Unable to fetch districts') print(districts.status_code) print(districts.text) os.system("pause") sys.exit(1) else: print('Unable to fetch states') print(states.status_code) print(states.text) os.system("pause") sys.exit(1) def get_beneficiaries(request_header): """ This function 1. Fetches all beneficiaries registered under the mobile number, 2. Prompts user to select the applicable beneficiaries, and 3. Returns the list of beneficiaries as list(dict) """ beneficiaries = requests.get(BENEFICIARIES_URL, headers=request_header) if beneficiaries.status_code == 200: beneficiaries = beneficiaries.json()['beneficiaries'] refined_beneficiaries = [] for beneficiary in beneficiaries: beneficiary['age'] = datetime.datetime.today().year - int(beneficiary['birth_year']) tmp = { 'bref_id': beneficiary['beneficiary_reference_id'], 'name': beneficiary['name'], 'vaccine': beneficiary['vaccine'], 'age': beneficiary['age'], 'status': beneficiary['vaccination_status'] } refined_beneficiaries.append(tmp) display_table(refined_beneficiaries) print(""" ################# IMPORTANT NOTES ################# # 1. While selecting beneficiaries, make sure that selected beneficiaries are all taking the same dose: either first OR second. # Please do no try to club together booking for first dose for one beneficiary and second dose for another beneficiary. # # 2. While selecting beneficiaries, also make sure that beneficiaries selected for second dose are all taking the same vaccine: COVISHIELD OR COVAXIN. # Please do no try to club together booking for beneficiary taking COVISHIELD with beneficiary taking COVAXIN. # # 3. If you're selecting multiple beneficiaries, make sure all are of the same age group (45+ or 18+) as defined by the govt. # Please do not try to club together booking for younger and older beneficiaries. ################################################### """) reqd_beneficiaries = input('Enter comma separated index numbers of beneficiaries to book for : ') beneficiary_idx = [int(idx) - 1 for idx in reqd_beneficiaries.split(',')] reqd_beneficiaries = [{ 'bref_id': item['beneficiary_reference_id'], 'name': item['name'], 'vaccine': item['vaccine'], 'age': item['age'], 'status': item['vaccination_status'] } for idx, item in enumerate(beneficiaries) if idx in beneficiary_idx] print(f'Selected beneficiaries: ') display_table(reqd_beneficiaries) return reqd_beneficiaries else: print('Unable to fetch beneficiaries') print(beneficiaries.status_code) print(beneficiaries.text) os.system("pause") return [] def get_min_age(beneficiary_dtls): """ This function returns a min age argument, based on age of all beneficiaries :param beneficiary_dtls: :return: min_age:int """ age_list = [item['age'] for item in beneficiary_dtls] min_age = min(age_list) return min_age def generate_token_OTP(mobile, request_header): """ This function generate OTP and returns a new token """ if not mobile: print("Mobile number cannot be empty") os.system('pause') sys.exit() valid_token = False while not valid_token: try: data = {"mobile": mobile, "secret": "U2FsdGVkX1+z/4Nr9nta+2DrVJSv7KS6VoQUSQ1ZXYDx/CJUkWxFYG6P3iM/VW+6jLQ9RDQVzp/RcZ8kbT41xw==" } txnId = requests.post(url=OTP_PRO_URL, json=data, headers=request_header) if txnId.status_code == 200: print(f"Successfully requested OTP for mobile number {mobile} at {datetime.datetime.today()}..") txnId = txnId.json()['txnId'] OTP = input("Enter OTP (If this takes more than 2 minutes, press Enter to retry): ") if OTP: data = {"otp": sha256(str(OTP).encode('utf-8')).hexdigest(), "txnId": txnId} print(f"Validating OTP..") token = requests.post(url='https://cdn-api.co-vin.in/api/v2/auth/validateMobileOtp', json=data, headers=request_header) if token.status_code == 200: token = token.json()['token'] print(f'Token Generated: {token}') valid_token = True return token else: print('Unable to Validate OTP') print(f"Response: {token.text}") retry = input(f"Retry with {mobile} ? (y/n Default y): ") retry = retry if retry else 'y' if retry == 'y': pass else: sys.exit() else: print('Unable to Generate OTP') print(txnId.status_code, txnId.text) retry = input(f"Retry with {mobile} ? (y/n Default y): ") retry = retry if retry else 'y' if retry == 'y': pass else: sys.exit() except Exception as e: print(str(e))
from keyword import iskeyword from collections import defaultdict from django.apps import apps from django.apps.config import MODELS_MODULE_NAME from django.db.models.fields.files import ImageField from .exceptions import InvalidKeyError, InvalidValueError from .const import CONFIG_NAME, nonsense_values_together from .validator_types import validation_rule_factory class VimageKey: def __init__(self, key): """ ``key`` must be one of the following: - ``'app_label.models'`` - ``'app_label.models.MyModel'`` - ``'app_label.models.MyModel.field'`` :param str key: dotted path to models/model/ImageField """ if not isinstance(key, str): raise TypeError(f'Each {CONFIG_NAME} dict key should be a ' f'<str> type. Current key: "{key}", is ' f'<{type(key).__name__}>!') self.key = key def __str__(self): return self.key def __repr__(self): return f'{self.__class__.__name__}({self.key!r})' def split_key(self): return self.key.split('.') @staticmethod def models_in_key(keywords): """ Whether ``django.apps.config.MODELS_MODULE_NAME`` word is the second word in the dotted key. :return: boolean """ return len(keywords) > 1 and keywords[1] == MODELS_MODULE_NAME @staticmethod def valid_key_length(keywords): """ Whether length of ``keywords`` is between 2 and 4 (inclusive) :param list keywords: list of strings :return: boolean """ return len(keywords) in range(2, 5) def key_non_empty_str(self): """ Whether ``self.key`` is a non-empty str. :return: boolean """ return isinstance(self.key, str) and bool(self.key) def validate_dotted_key(self): """ Assumes that ``self.key`` is a str. Whether the key defined, is a valid python dotted path to one of the following: - ``'app_label.models'`` - ``'app_label.models.MyModel'`` - ``'app_label.models.MyModel.ImageField_field'`` :return: boolean """ start = f'[{self.key}]:' keywords = self.split_key() if not self.valid_key_length(keywords): err = f'{start} The key must consists of two to four words, ' \ f'separated by dot. It must be a path to one of ' \ f'the following: the "models" module, ' \ f'a Django Model class or a Django ImageField field.' raise InvalidKeyError(err) if not self.models_in_key(keywords): err = f'{start} The second word of the key, should be ' \ f'"{MODELS_MODULE_NAME}", not "{keywords[1]}"!' raise InvalidKeyError(err) app_label = keywords[0] try: app_config = apps.get_app_config(app_label=app_label) models_module = app_config.models_module.__name__ # noqa: F841 except LookupError: err = f'{start} The app "{app_label}" is either not in ' \ f'"INSTALLED_APPS" or it does not exist!' raise InvalidKeyError(err) except AttributeError: err = f'{start} The app "{app_label}" has no "models" ' \ f'module defined. Are you sure it exists?' raise InvalidKeyError(err) else: # By now the app exists and has a "models" module if len(keywords) == 2: return keywords = keywords[2:] # by now keywords should be at least ['<ModelName>'] model_classes = list(app_config.get_models()) model_names = [m.__name__ for m in model_classes] model_name = keywords.pop(0) if model_name not in model_names: err = f'{start} The model "{model_name}" does not exist! ' \ f'Available model names: "{', '.join(model_names)}".' raise InvalidKeyError(err) # by now keywords should be just the field's name ['<ImageField>'] if len(keywords) == 1: image_field_names = [ field.name for model in model_classes for field in model._meta.get_fields() if isinstance(field, ImageField) ] field_name = keywords.pop(0) if field_name not in image_field_names: err = f'{start} The field "{field_name}" does not ' \ f'exist! Available ImageField names: ' \ f'"{', '.join(image_field_names)}".' raise InvalidKeyError(err) def key_valid_dotted_format(self): """ Whether ``self.key`` is a syntactically correct python dotted path. Valid: ``'path.to.a.package.or.module'`` Invalid: ``'path/to//module', 'path,to,,module', 'path,to..module'`` :return: boolean """ split_path = self.split_key() return all([ True if word.isidentifier() and not iskeyword(word) else False for word in split_path ]) def validate_key(self): if not self.key_valid_dotted_format(): err = f'The key "{self.key}" is not a valid python dotted path ' \ f'(words separated with the "." dot character). ' \ f'Please check for any typos!' raise InvalidKeyError(err) self.validate_dotted_key() def get_app_img_fields(self): """ Calculates and returns all the :class:`~django.db.models.ImageField` fields of the app. :return: list of Django ``ImageField`` objects """ keywords = self.split_key() app_config = apps.get_app_config(app_label=keywords[0]) app_models = app_config.get_models() return [ field for model in app_models for field in model._meta.get_fields() if isinstance(field, ImageField) ] def get_specific_model_img_fields(self): """ Assumes that ``self.key`` contains, at least, a model name (class). Calculates and returns all the :class:`~django.db.models.ImageField` fields of the specified Model. :return: list of Django ``ImageField`` objects """ keywords = self.split_key() model = apps.get_model(app_label=keywords[0], model_name=keywords[-1]) return [ field for field in model._meta.get_fields() if isinstance(field, ImageField) ] def get_img_field(self): """ Assumes that ``self.key`` contains the :class:`~django.db.models.ImageField` field. Finds and returns the specified ``ImageField`` field as a list. :return: list with one element (``ImageField`` object) """ keywords = self.split_key() model = apps.get_model(app_label=keywords[0], model_name=keywords[-2]) return [model._meta.get_field(keywords[-1])] def get_specificity(self): """ Assumes that the ``self.key`` is valid. Calculates the specificity of the key. The higher the specificity, the higher the precedence it takes over, an other, same key. =============================== ===================== KEY SPECIFICITY =============================== ===================== ``'app.models'`` 1 ``'app.models.MyModel'`` 2 ``'app.models.MyModel.field'`` 3 =============================== ===================== :return: int """ keywords_len = len(self.split_key()) if keywords_len not in range(2, 5): return 0 return keywords_len - 1 def get_app_label(self): """ Returns the ``app_label`` of this key. Example:: key = 'myapp.models.MyModel' self.get_app_label() == 'myapp' :return: str, the app label of the key """ return self.split_key()[0] def get_fields(self): """ Assumes that ``self.key`` is valid. Returns a list of :class:`~django.db.models.ImageField` fields this key affects. :return: list """ specificity = self.get_specificity() if specificity: if specificity == 1: return self.get_app_img_fields() elif specificity == 2: return self.get_specific_model_img_fields() else: return self.get_img_field() return [] def is_valid(self): """ Whether ``self.key`` is valid. :return: None or raises ``InvalidKeyError`` """ if not self.key_non_empty_str(): err = f'The key "{self.key}" should be a non-empty string. It ' \ f'must be the dotted path to the app\'s "models" module ' \ f'or a "Model" class or an "ImageField" field.' raise InvalidKeyError(err) self.validate_key() class VimageValue: def __init__(self, value): """ Initialize with each value of the ``VIMAGE`` dict setting. :param dict value: dict """ if not isinstance(value, dict): raise TypeError(f'Each {CONFIG_NAME} dict value should be a ' f'<dict> type. Current value: "{value}", is ' f'<{type(value).__name__}>!') self.value = value self.validators = [] def __str__(self): return str(self.value) def __repr__(self): return f'{self.__class__.__name__}({self.value!r})' def validation_rule_generator(self): """ Depending on the key:value pair of ``self.value``, returns the corresponding class instance of this validation type. :return: class instance """ for key, value in self.value.items(): yield validation_rule_factory(key, value) def validate_value(self): """ Whether the value (not the key) of the validation rule is valid. :return: None or raises ``InvalidValueError`` """ for vr in self.validation_rule_generator(): vr.is_valid() def nonsense_keys_together(self): """ There is no sense of declaring, in the same rule, ``'DIMENSIONS'`` and ``'ASPECT_RATIO'``. These values are mutually exclusive. :return: None or raises ``InvalidValueError`` """ set_keys = set(self.value.keys()) for nonsense_value_together in nonsense_values_together(): if nonsense_value_together.issubset(set_keys): err = f'The value "{self.value}" contains nonsense values ' \ f'that together will not work! Use one of these.' \ f'Nonsense values together: ' \ f'"{', '.join(nonsense_value_together)}"' raise InvalidValueError(err) def type_validator_mapping(self): """ Given a set of rules (key: value pair), return a key: value entry with the name of the validation type as the key and the validator itself (callable) as the value. Example:: # if self.value == {'SIZE', 100} self.type_validator_mapping() == { 'SIZE': <validator_function>, } :return: dict """ return { vr.name: vr.generate_validator() for vr in self.validation_rule_generator() } def is_valid(self): """ Whether ``self.value`` is syntactically correct. :return: None or raises ``InvalidValueError`` """ if self.value == {}: err = f'The value "{self.value}" should be a non-empty dict ' \ f'with the proper validation rules. ' \ f'Please check the documentation for more information.' raise InvalidValueError(err) # check if nonsense keys (inside the self.value dict) are defined self.nonsense_keys_together() # keys are valid, proceed with self.value values self.validate_value() class VimageEntry: def __init__(self, key, value): """ The "entry" is a wrapper of :class:`~vimage.core.base.VimageKey` and :class:`~vimage.core.base.VimageValue` classes. It accepts a str (as the ``VimageKey``) and a dict (as the ``VimageValue``). Example:: { 'myapp.models.MyModel': { 'SIZE': 50, 'DIMENSIONS': (1000, 1000), } } :param str key: the key, str, of the config dictionary :param dict value: the value, dict, of the config dictionary """ self.k = key self.v = value self.key = VimageKey(self.k) self.value = VimageValue(self.v) def __str__(self): return f'{self.key}: {self.value}' def __repr__(self): return f'{self.__class__.__name__}({self.k!r}, {self.v!r})' def is_valid(self): """ Checks if both ``self.key`` and ``self.value`` are syntactically correct. :return: None or raises ``InvalidKeyError`` or ``InvalidValueError`` """ self.key.is_valid() self.value.is_valid() @property def app_label(self): """ Calls :meth:`~vimage.core.base.VimageKey.get_app_label` :return: str """ return self.key.get_app_label() @property def fields(self): """ Calls :meth:`~vimage.core.base.VimageKey.get_fields` :return: list """ return self.key.get_fields() @property def specificity(self): """ Calls :meth:`~vimage.core.base.VimageKey.get_specificity` :return: int """ return self.key.get_specificity() @property def mapping(self): """ Calls :meth:`~vimage.core.base.VimageValue.type_validator_mapping` :return: dict """ return self.value.type_validator_mapping() @property def entry_info(self): """ Provides the ``app_label``, ``specificity``, ``fields`` and a ``mapping`` between validator type's name and the validator itself:: # If "self.entry" is: { 'myapp.models.MyModel': { 'SIZE': 120, 'DIMENSIONS': (500, 500), } } # the return value from this method, would be: { 'app_label': 'my_app', 'specificity': 2, 'fields': [<ImageField1>, <ImageField2>, ...], 'mapping': { 'SIZE': <ValidationRuleSize.validator>, 'DIMENSIONS': <ValidationRuleDimensions.validator>, } } :return: dict of valuable information about this entry """ return { 'app_label': self.app_label, 'specificity': self.specificity, 'fields': self.fields, 'mapping': self.mapping, } class VimageConfig: def __init__(self, config): """ Initialize with the ``VIMAGE`` dict, defined in settings. :param dict config: the whole configuration VIMAGE dict setting """ self.config = config def __str__(self): return str(self.config) def __repr__(self): return f'{self.__class__.__name__}({self.config!r})' def vimage_entry_generator(self): """ For each key-value pair in the ``VIMAGE`` dict, yields a :class:`~vimage.core.base.VimageEntry` instance. :return: generator """ for key, value in self.config.items(): yield VimageEntry(key, value) def build_info(self): """ Given each ``VimageEntry`` object, build a dict with ``app_label`` as the key and ``VimageEntry.entry_info`` dict as the value. In other words, put entries that belong to the same app, inside a single *bucket*. Next step, is to sort, each bucket, by specificity. Return example:: { 'myapp': [ { 'app_label': 'myapp', 'fields': [<ImageField1>, <ImageField2>, ...], 'specificity': 2, 'mapping': { 'SIZE': <ValidationRuleSize.validator>, 'FORMAT': <ValidationRuleFormat.validator> }, }, { 'app_label': 'myapp', 'fields': [<ImageField1>, <ImageField2>, ...], 'specificity': 1, 'mapping': { 'SIZE': <ValidationRuleSize.validator> }, } ], 'myapp2': [ { 'app_label': 'myapp2', 'fields': [<ImageField1>], 'specificity': 3, 'mapping': { 'FORMAT': <ValidationRuleFormat.validator> } }, ], ... } :return: dict """ info = defaultdict(list) for vimage_entry in self.vimage_entry_generator(): entry_info = vimage_entry.entry_info info[entry_info['app_label']].append(entry_info) return info @staticmethod def sort_info(info): """ Sorts the list inside each bucket per specificity. Lower specificity comes first. :param dict info: dict of useful info about the VimageEntry :return: dict """ for value in info.values(): value.sort(key=lambda d: d['specificity']) return info @staticmethod def build_draft_registry(info): """ Builds the draft registry, dict, in the following pattern:: { <field1>: {'validator_name': <validator>, ...} <field2>: {'validator_name': <validator>, ...} } Think of the ``draft_registry`` as a pre-build of the ``self.registry``. Because each "info bucket" is sorted by specificity, the first "info" element in each "bucket" will always be a subset of the next one. If all elements inside the bucket have the same specificity then they will refer to different fields, thus, different keys inside ``self.draft_registry``. If a "bucket list" has three different elements with 1, 2, 3 specificity values, respectively, then the specific ``ImageField`` of the last element (specificity == 3) will override any given validators from the previous elements (specificity in [1, 2]). :param dict info: dict of useful info about the VimageEntry :return: dict """ draft_registry = {} # info is a DefaultDict <str>: <list> for values in info.values(): # values == list of one or more dicts (see "_build_info" docstring) for d in values: # each d is a dict for field in d['fields']: mapping = d['mapping'] if field not in draft_registry: # New field for validation. Add it. # Extra caution here due to reference by name. # A new dict (with new id) must be added to each field. draft_registry[field] = { k: v for k, v in mapping.items() } else: # update/insert validator to existing field for k, v in mapping.items(): draft_registry[field][k] = v return draft_registry @staticmethod def build_registry(draft_registry): """ Given the ``draft_registry`` dict, construct the main registry which is the basis of adding the validators into each ``ImageField``. :param dict draft_registry: a helper dict to help build the registry :return: dict """ registry = {} for field, mapping in draft_registry.items(): registry[field] = list(mapping.values()) return registry def add_validators(self): """ The entry point where the ``draft_registry`` and the ``registry`` are build and, finally, the validators are added to the corresponding ``ImageField`` fields. :return: None """ info = self.build_info() sorted_info = self.sort_info(info) draft_registry = self.build_draft_registry(sorted_info) registry = self.build_registry(draft_registry) for field, validators in registry.items(): field.validators += validators
from keyword import iskeyword from collections import defaultdict from django.apps import apps from django.apps.config import MODELS_MODULE_NAME from django.db.models.fields.files import ImageField from .exceptions import InvalidKeyError, InvalidValueError from .const import CONFIG_NAME, nonsense_values_together from .validator_types import validation_rule_factory class VimageKey: def __init__(self, key): """ ``key`` must be one of the following: - ``'app_label.models'`` - ``'app_label.models.MyModel'`` - ``'app_label.models.MyModel.field'`` :param str key: dotted path to models/model/ImageField """ if not isinstance(key, str): raise TypeError(f'Each {CONFIG_NAME} dict key should be a ' f'<str> type. Current key: "{key}", is ' f'<{type(key).__name__}>!') self.key = key def __str__(self): return self.key def __repr__(self): return f'{self.__class__.__name__}({self.key!r})' def split_key(self): return self.key.split('.') @staticmethod def models_in_key(keywords): """ Whether ``django.apps.config.MODELS_MODULE_NAME`` word is the second word in the dotted key. :return: boolean """ return len(keywords) > 1 and keywords[1] == MODELS_MODULE_NAME @staticmethod def valid_key_length(keywords): """ Whether length of ``keywords`` is between 2 and 4 (inclusive) :param list keywords: list of strings :return: boolean """ return len(keywords) in range(2, 5) def key_non_empty_str(self): """ Whether ``self.key`` is a non-empty str. :return: boolean """ return isinstance(self.key, str) and bool(self.key) def validate_dotted_key(self): """ Assumes that ``self.key`` is a str. Whether the key defined, is a valid python dotted path to one of the following: - ``'app_label.models'`` - ``'app_label.models.MyModel'`` - ``'app_label.models.MyModel.ImageField_field'`` :return: boolean """ start = f'[{self.key}]:' keywords = self.split_key() if not self.valid_key_length(keywords): err = f'{start} The key must consists of two to four words, ' \ f'separated by dot. It must be a path to one of ' \ f'the following: the "models" module, ' \ f'a Django Model class or a Django ImageField field.' raise InvalidKeyError(err) if not self.models_in_key(keywords): err = f'{start} The second word of the key, should be ' \ f'"{MODELS_MODULE_NAME}", not "{keywords[1]}"!' raise InvalidKeyError(err) app_label = keywords[0] try: app_config = apps.get_app_config(app_label=app_label) models_module = app_config.models_module.__name__ # noqa: F841 except LookupError: err = f'{start} The app "{app_label}" is either not in ' \ f'"INSTALLED_APPS" or it does not exist!' raise InvalidKeyError(err) except AttributeError: err = f'{start} The app "{app_label}" has no "models" ' \ f'module defined. Are you sure it exists?' raise InvalidKeyError(err) else: # By now the app exists and has a "models" module if len(keywords) == 2: return keywords = keywords[2:] # by now keywords should be at least ['<ModelName>'] model_classes = list(app_config.get_models()) model_names = [m.__name__ for m in model_classes] model_name = keywords.pop(0) if model_name not in model_names: err = f'{start} The model "{model_name}" does not exist! ' \ f'Available model names: "{", ".join(model_names)}".' raise InvalidKeyError(err) # by now keywords should be just the field's name ['<ImageField>'] if len(keywords) == 1: image_field_names = [ field.name for model in model_classes for field in model._meta.get_fields() if isinstance(field, ImageField) ] field_name = keywords.pop(0) if field_name not in image_field_names: err = f'{start} The field "{field_name}" does not ' \ f'exist! Available ImageField names: ' \ f'"{", ".join(image_field_names)}".' raise InvalidKeyError(err) def key_valid_dotted_format(self): """ Whether ``self.key`` is a syntactically correct python dotted path. Valid: ``'path.to.a.package.or.module'`` Invalid: ``'path/to//module', 'path,to,,module', 'path,to..module'`` :return: boolean """ split_path = self.split_key() return all([ True if word.isidentifier() and not iskeyword(word) else False for word in split_path ]) def validate_key(self): if not self.key_valid_dotted_format(): err = f'The key "{self.key}" is not a valid python dotted path ' \ f'(words separated with the "." dot character). ' \ f'Please check for any typos!' raise InvalidKeyError(err) self.validate_dotted_key() def get_app_img_fields(self): """ Calculates and returns all the :class:`~django.db.models.ImageField` fields of the app. :return: list of Django ``ImageField`` objects """ keywords = self.split_key() app_config = apps.get_app_config(app_label=keywords[0]) app_models = app_config.get_models() return [ field for model in app_models for field in model._meta.get_fields() if isinstance(field, ImageField) ] def get_specific_model_img_fields(self): """ Assumes that ``self.key`` contains, at least, a model name (class). Calculates and returns all the :class:`~django.db.models.ImageField` fields of the specified Model. :return: list of Django ``ImageField`` objects """ keywords = self.split_key() model = apps.get_model(app_label=keywords[0], model_name=keywords[-1]) return [ field for field in model._meta.get_fields() if isinstance(field, ImageField) ] def get_img_field(self): """ Assumes that ``self.key`` contains the :class:`~django.db.models.ImageField` field. Finds and returns the specified ``ImageField`` field as a list. :return: list with one element (``ImageField`` object) """ keywords = self.split_key() model = apps.get_model(app_label=keywords[0], model_name=keywords[-2]) return [model._meta.get_field(keywords[-1])] def get_specificity(self): """ Assumes that the ``self.key`` is valid. Calculates the specificity of the key. The higher the specificity, the higher the precedence it takes over, an other, same key. =============================== ===================== KEY SPECIFICITY =============================== ===================== ``'app.models'`` 1 ``'app.models.MyModel'`` 2 ``'app.models.MyModel.field'`` 3 =============================== ===================== :return: int """ keywords_len = len(self.split_key()) if keywords_len not in range(2, 5): return 0 return keywords_len - 1 def get_app_label(self): """ Returns the ``app_label`` of this key. Example:: key = 'myapp.models.MyModel' self.get_app_label() == 'myapp' :return: str, the app label of the key """ return self.split_key()[0] def get_fields(self): """ Assumes that ``self.key`` is valid. Returns a list of :class:`~django.db.models.ImageField` fields this key affects. :return: list """ specificity = self.get_specificity() if specificity: if specificity == 1: return self.get_app_img_fields() elif specificity == 2: return self.get_specific_model_img_fields() else: return self.get_img_field() return [] def is_valid(self): """ Whether ``self.key`` is valid. :return: None or raises ``InvalidKeyError`` """ if not self.key_non_empty_str(): err = f'The key "{self.key}" should be a non-empty string. It ' \ f'must be the dotted path to the app\'s "models" module ' \ f'or a "Model" class or an "ImageField" field.' raise InvalidKeyError(err) self.validate_key() class VimageValue: def __init__(self, value): """ Initialize with each value of the ``VIMAGE`` dict setting. :param dict value: dict """ if not isinstance(value, dict): raise TypeError(f'Each {CONFIG_NAME} dict value should be a ' f'<dict> type. Current value: "{value}", is ' f'<{type(value).__name__}>!') self.value = value self.validators = [] def __str__(self): return str(self.value) def __repr__(self): return f'{self.__class__.__name__}({self.value!r})' def validation_rule_generator(self): """ Depending on the key:value pair of ``self.value``, returns the corresponding class instance of this validation type. :return: class instance """ for key, value in self.value.items(): yield validation_rule_factory(key, value) def validate_value(self): """ Whether the value (not the key) of the validation rule is valid. :return: None or raises ``InvalidValueError`` """ for vr in self.validation_rule_generator(): vr.is_valid() def nonsense_keys_together(self): """ There is no sense of declaring, in the same rule, ``'DIMENSIONS'`` and ``'ASPECT_RATIO'``. These values are mutually exclusive. :return: None or raises ``InvalidValueError`` """ set_keys = set(self.value.keys()) for nonsense_value_together in nonsense_values_together(): if nonsense_value_together.issubset(set_keys): err = f'The value "{self.value}" contains nonsense values ' \ f'that together will not work! Use one of these.' \ f'Nonsense values together: ' \ f'"{", ".join(nonsense_value_together)}"' raise InvalidValueError(err) def type_validator_mapping(self): """ Given a set of rules (key: value pair), return a key: value entry with the name of the validation type as the key and the validator itself (callable) as the value. Example:: # if self.value == {'SIZE', 100} self.type_validator_mapping() == { 'SIZE': <validator_function>, } :return: dict """ return { vr.name: vr.generate_validator() for vr in self.validation_rule_generator() } def is_valid(self): """ Whether ``self.value`` is syntactically correct. :return: None or raises ``InvalidValueError`` """ if self.value == {}: err = f'The value "{self.value}" should be a non-empty dict ' \ f'with the proper validation rules. ' \ f'Please check the documentation for more information.' raise InvalidValueError(err) # check if nonsense keys (inside the self.value dict) are defined self.nonsense_keys_together() # keys are valid, proceed with self.value values self.validate_value() class VimageEntry: def __init__(self, key, value): """ The "entry" is a wrapper of :class:`~vimage.core.base.VimageKey` and :class:`~vimage.core.base.VimageValue` classes. It accepts a str (as the ``VimageKey``) and a dict (as the ``VimageValue``). Example:: { 'myapp.models.MyModel': { 'SIZE': 50, 'DIMENSIONS': (1000, 1000), } } :param str key: the key, str, of the config dictionary :param dict value: the value, dict, of the config dictionary """ self.k = key self.v = value self.key = VimageKey(self.k) self.value = VimageValue(self.v) def __str__(self): return f'{self.key}: {self.value}' def __repr__(self): return f'{self.__class__.__name__}({self.k!r}, {self.v!r})' def is_valid(self): """ Checks if both ``self.key`` and ``self.value`` are syntactically correct. :return: None or raises ``InvalidKeyError`` or ``InvalidValueError`` """ self.key.is_valid() self.value.is_valid() @property def app_label(self): """ Calls :meth:`~vimage.core.base.VimageKey.get_app_label` :return: str """ return self.key.get_app_label() @property def fields(self): """ Calls :meth:`~vimage.core.base.VimageKey.get_fields` :return: list """ return self.key.get_fields() @property def specificity(self): """ Calls :meth:`~vimage.core.base.VimageKey.get_specificity` :return: int """ return self.key.get_specificity() @property def mapping(self): """ Calls :meth:`~vimage.core.base.VimageValue.type_validator_mapping` :return: dict """ return self.value.type_validator_mapping() @property def entry_info(self): """ Provides the ``app_label``, ``specificity``, ``fields`` and a ``mapping`` between validator type's name and the validator itself:: # If "self.entry" is: { 'myapp.models.MyModel': { 'SIZE': 120, 'DIMENSIONS': (500, 500), } } # the return value from this method, would be: { 'app_label': 'my_app', 'specificity': 2, 'fields': [<ImageField1>, <ImageField2>, ...], 'mapping': { 'SIZE': <ValidationRuleSize.validator>, 'DIMENSIONS': <ValidationRuleDimensions.validator>, } } :return: dict of valuable information about this entry """ return { 'app_label': self.app_label, 'specificity': self.specificity, 'fields': self.fields, 'mapping': self.mapping, } class VimageConfig: def __init__(self, config): """ Initialize with the ``VIMAGE`` dict, defined in settings. :param dict config: the whole configuration VIMAGE dict setting """ self.config = config def __str__(self): return str(self.config) def __repr__(self): return f'{self.__class__.__name__}({self.config!r})' def vimage_entry_generator(self): """ For each key-value pair in the ``VIMAGE`` dict, yields a :class:`~vimage.core.base.VimageEntry` instance. :return: generator """ for key, value in self.config.items(): yield VimageEntry(key, value) def build_info(self): """ Given each ``VimageEntry`` object, build a dict with ``app_label`` as the key and ``VimageEntry.entry_info`` dict as the value. In other words, put entries that belong to the same app, inside a single *bucket*. Next step, is to sort, each bucket, by specificity. Return example:: { 'myapp': [ { 'app_label': 'myapp', 'fields': [<ImageField1>, <ImageField2>, ...], 'specificity': 2, 'mapping': { 'SIZE': <ValidationRuleSize.validator>, 'FORMAT': <ValidationRuleFormat.validator> }, }, { 'app_label': 'myapp', 'fields': [<ImageField1>, <ImageField2>, ...], 'specificity': 1, 'mapping': { 'SIZE': <ValidationRuleSize.validator> }, } ], 'myapp2': [ { 'app_label': 'myapp2', 'fields': [<ImageField1>], 'specificity': 3, 'mapping': { 'FORMAT': <ValidationRuleFormat.validator> } }, ], ... } :return: dict """ info = defaultdict(list) for vimage_entry in self.vimage_entry_generator(): entry_info = vimage_entry.entry_info info[entry_info['app_label']].append(entry_info) return info @staticmethod def sort_info(info): """ Sorts the list inside each bucket per specificity. Lower specificity comes first. :param dict info: dict of useful info about the VimageEntry :return: dict """ for value in info.values(): value.sort(key=lambda d: d['specificity']) return info @staticmethod def build_draft_registry(info): """ Builds the draft registry, dict, in the following pattern:: { <field1>: {'validator_name': <validator>, ...} <field2>: {'validator_name': <validator>, ...} } Think of the ``draft_registry`` as a pre-build of the ``self.registry``. Because each "info bucket" is sorted by specificity, the first "info" element in each "bucket" will always be a subset of the next one. If all elements inside the bucket have the same specificity then they will refer to different fields, thus, different keys inside ``self.draft_registry``. If a "bucket list" has three different elements with 1, 2, 3 specificity values, respectively, then the specific ``ImageField`` of the last element (specificity == 3) will override any given validators from the previous elements (specificity in [1, 2]). :param dict info: dict of useful info about the VimageEntry :return: dict """ draft_registry = {} # info is a DefaultDict <str>: <list> for values in info.values(): # values == list of one or more dicts (see "_build_info" docstring) for d in values: # each d is a dict for field in d['fields']: mapping = d['mapping'] if field not in draft_registry: # New field for validation. Add it. # Extra caution here due to reference by name. # A new dict (with new id) must be added to each field. draft_registry[field] = { k: v for k, v in mapping.items() } else: # update/insert validator to existing field for k, v in mapping.items(): draft_registry[field][k] = v return draft_registry @staticmethod def build_registry(draft_registry): """ Given the ``draft_registry`` dict, construct the main registry which is the basis of adding the validators into each ``ImageField``. :param dict draft_registry: a helper dict to help build the registry :return: dict """ registry = {} for field, mapping in draft_registry.items(): registry[field] = list(mapping.values()) return registry def add_validators(self): """ The entry point where the ``draft_registry`` and the ``registry`` are build and, finally, the validators are added to the corresponding ``ImageField`` fields. :return: None """ info = self.build_info() sorted_info = self.sort_info(info) draft_registry = self.build_draft_registry(sorted_info) registry = self.build_registry(draft_registry) for field, validators in registry.items(): field.validators += validators
#!/usr/bin/python3 import argparse import clickhouse_driver import itertools import functools import math import os import pprint import random import re import statistics import string import sys import time import traceback import xml.etree.ElementTree as et from scipy import stats def tsv_escape(s): return s.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n').replace('\r','') parser = argparse.ArgumentParser(description='Run performance test.') # Explicitly decode files as UTF-8 because sometimes we have Russian characters in queries, and LANG=C is set. parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file') parser.add_argument('--host', nargs='*', default=['localhost'], help="Server hostname(s). Corresponds to '--port' options.") parser.add_argument('--port', nargs='*', default=[9000], help="Server port(s). Corresponds to '--host' options.") parser.add_argument('--runs', type=int, default=1, help='Number of query runs per server.') parser.add_argument('--max-queries', type=int, default=None, help='Test no more than this number of queries, chosen at random.') parser.add_argument('--long', action='store_true', help='Do not skip the tests tagged as long.') parser.add_argument('--print-queries', action='store_true', help='Print test queries and exit.') parser.add_argument('--print-settings', action='store_true', help='Print test settings and exit.') args = parser.parse_args() test_name = os.path.splitext(os.path.basename(args.file[0].name))[0] tree = et.parse(args.file[0]) root = tree.getroot() # Process query parameters subst_elems = root.findall('substitutions/substitution') available_parameters = {} # { 'table': ['hits_10m', 'hits_100m'], ... } for e in subst_elems: available_parameters[e.find('name').text] = [v.text for v in e.findall('values/value')] # Takes parallel lists of templates, substitutes them with all combos of # parameters. The set of parameters is determined based on the first list. # Note: keep the order of queries -- sometimes we have DROP IF EXISTS # followed by CREATE in create queries section, so the order matters. def substitute_parameters(query_templates, other_templates = []): query_results = [] other_results = [[]] * (len(other_templates)) for i, q in enumerate(query_templates): keys = set(n for _, n, _, _ in string.Formatter().parse(q) if n) values = [available_parameters[k] for k in keys] combos = itertools.product(*values) for c in combos: with_keys = dict(zip(keys, c)) query_results.append(q.format(**with_keys)) for j, t in enumerate(other_templates): other_results[j].append(t[i].format(**with_keys)) if len(other_templates): return query_results, other_results else: return query_results # Build a list of test queries, substituting parameters to query templates, # and reporting the queries marked as short. test_queries = [] is_short = [] for e in root.findall('query'): new_queries, [new_is_short] = substitute_parameters([e.text], [[e.attrib.get('short', '0')]]) test_queries += new_queries is_short += [eval(s) for s in new_is_short] assert(len(test_queries) == len(is_short)) # If we're only asked to print the queries, do that and exit if args.print_queries: for q in test_queries: print(q) exit(0) # Print short queries for i, s in enumerate(is_short): if s: print(f'short\t{i}') # If we're only asked to print the settings, do that and exit. These are settings # for clickhouse-benchmark, so we print them as command line arguments, e.g. # '--max_memory_usage=10000000'. if args.print_settings: for s in root.findall('settings/*'): print(f'--{s.tag}={s.text}') exit(0) # Skip long tests if not args.long: for tag in root.findall('.//tag'): if tag.text == 'long': print('skipped\tTest is tagged as long.') sys.exit(0) # Print report threshold for the test if it is set. if 'max_ignored_relative_change' in root.attrib: print(f'report-threshold\t{root.attrib['max_ignored_relative_change']}') # Open connections servers = [{'host': host, 'port': port} for (host, port) in zip(args.host, args.port)] all_connections = [clickhouse_driver.Client(**server) for server in servers] for s in servers: print('server\t{}\t{}'.format(s['host'], s['port'])) # Run drop queries, ignoring errors. Do this before all other activity, because # clickhouse_driver disconnects on error (this is not configurable), and the new # connection loses the changes in settings. drop_query_templates = [q.text for q in root.findall('drop_query')] drop_queries = substitute_parameters(drop_query_templates) for conn_index, c in enumerate(all_connections): for q in drop_queries: try: c.execute(q) print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}') except: pass # Apply settings. # If there are errors, report them and continue -- maybe a new test uses a setting # that is not in master, but the queries can still run. If we have multiple # settings and one of them throws an exception, all previous settings for this # connection will be reset, because the driver reconnects on error (not # configurable). So the end result is uncertain, but hopefully we'll be able to # run at least some queries. settings = root.findall('settings/*') for conn_index, c in enumerate(all_connections): for s in settings: try: q = f"set {s.tag} = '{s.text}'" c.execute(q) print(f'set\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}') except: print(traceback.format_exc(), file=sys.stderr) # Check tables that should exist. If they don't exist, just skip this test. tables = [e.text for e in root.findall('preconditions/table_exists')] for t in tables: for c in all_connections: try: res = c.execute("select 1 from {} limit 1".format(t)) except: exception_message = traceback.format_exception_only(*sys.exc_info()[:2])[-1] skipped_message = ' '.join(exception_message.split('\n')[:2]) print(f'skipped\t{tsv_escape(skipped_message)}') sys.exit(0) # Run create queries create_query_templates = [q.text for q in root.findall('create_query')] create_queries = substitute_parameters(create_query_templates) # Disallow temporary tables, because the clickhouse_driver reconnects on errors, # and temporary tables are destroyed. We want to be able to continue after some # errors. for q in create_queries: if re.search('create temporary table', q, flags=re.IGNORECASE): print(f"Temporary tables are not allowed in performance tests: '{q}'", file = sys.stderr) sys.exit(1) for conn_index, c in enumerate(all_connections): for q in create_queries: c.execute(q) print(f'create\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}') # Run fill queries fill_query_templates = [q.text for q in root.findall('fill_query')] fill_queries = substitute_parameters(fill_query_templates) for conn_index, c in enumerate(all_connections): for q in fill_queries: c.execute(q) print(f'fill\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}') # Run the queries in randomized order, but preserve their indexes as specified # in the test XML. To avoid using too much time, limit the number of queries # we run per test. queries_to_run = random.sample(range(0, len(test_queries)), min(len(test_queries), args.max_queries or len(test_queries))) # Run test queries. for query_index in queries_to_run: q = test_queries[query_index] query_prefix = f'{test_name}.query{query_index}' # We have some crazy long queries (about 100kB), so trim them to a sane # length. This means we can't use query text as an identifier and have to # use the test name + the test-wide query index. query_display_name = q if len(query_display_name) > 1000: query_display_name = f'{query_display_name[:1000]}...({query_index})' print(f'display-name\t{query_index}\t{tsv_escape(query_display_name)}') # Prewarm: run once on both servers. Helps to bring the data into memory, # precompile the queries, etc. # A query might not run on the old server if it uses a function added in the # new one. We want to run them on the new server only, so that the PR author # can ensure that the test works properly. Remember the errors we had on # each server. query_error_on_connection = [None] * len(all_connections); for conn_index, c in enumerate(all_connections): try: prewarm_id = f'{query_prefix}.prewarm0' # Will also detect too long queries during warmup stage res = c.execute(q, query_id = prewarm_id, settings = {'max_execution_time': 10}) print(f'prewarm\t{query_index}\t{prewarm_id}\t{conn_index}\t{c.last_query.elapsed}') except KeyboardInterrupt: raise except: # FIXME the driver reconnects on error and we lose settings, so this # might lead to further errors or unexpected behavior. query_error_on_connection[conn_index] = traceback.format_exc(); continue # Report all errors that ocurred during prewarm and decide what to do next. # If prewarm fails for the query on all servers -- skip the query and # continue testing the next query. # If prewarm fails on one of the servers, run the query on the rest of them. no_errors = [] for i, e in enumerate(query_error_on_connection): if e: print(e, file = sys.stderr) else: no_errors.append(i) if len(no_errors) == 0: continue elif len(no_errors) < len(all_connections): print(f'partial\t{query_index}\t{no_errors}') this_query_connections = [all_connections[index] for index in no_errors] # Now, perform measured runs. # Track the time spent by the client to process this query, so that we can # notice the queries that take long to process on the client side, e.g. by # sending excessive data. start_seconds = time.perf_counter() server_seconds = 0 profile_seconds = 0 run = 0 # Arrays of run times for each connection. all_server_times = [] for conn_index, c in enumerate(this_query_connections): all_server_times.append([]) while True: run_id = f'{query_prefix}.run{run}' for conn_index, c in enumerate(this_query_connections): try: res = c.execute(q, query_id = run_id) except Exception as e: # Add query id to the exception to make debugging easier. e.args = (run_id, *e.args) e.message = run_id + ': ' + e.message raise elapsed = c.last_query.elapsed all_server_times[conn_index].append(elapsed) server_seconds += elapsed print(f'query\t{query_index}\t{run_id}\t{conn_index}\t{elapsed}') if elapsed > 10: # Stop processing pathologically slow queries, to avoid timing out # the entire test task. This shouldn't really happen, so we don't # need much handling for this case and can just exit. print(f'The query no. {query_index} is taking too long to run ({elapsed} s)', file=sys.stderr) exit(2) # Be careful with the counter, after this line it's the next iteration # already. run += 1 # Try to run any query for at least the specified number of times, # before considering other stop conditions. if run < args.runs: continue # For very short queries we have a special mode where we run them for at # least some time. The recommended lower bound of run time for "normal" # queries is about 0.1 s, and we run them about 10 times, giving the # time per query per server of about one second. Use this value as a # reference for "short" queries. if is_short[query_index]: if server_seconds >= 2 * len(this_query_connections): break # Also limit the number of runs, so that we don't go crazy processing # the results -- 'eqmed.sql' is really suboptimal. if run >= 500: break else: if run >= args.runs: break client_seconds = time.perf_counter() - start_seconds print(f'client-time\t{query_index}\t{client_seconds}\t{server_seconds}') #print(all_server_times) #print(stats.ttest_ind(all_server_times[0], all_server_times[1], equal_var = False).pvalue) # Run additional profiling queries to collect profile data, but only if test times appeared to be different. # We have to do it after normal runs because otherwise it will affect test statistics too much if len(all_server_times) == 2 and stats.ttest_ind(all_server_times[0], all_server_times[1], equal_var = False).pvalue < 0.1: run = 0 while True: run_id = f'{query_prefix}.profile{run}' for conn_index, c in enumerate(this_query_connections): try: res = c.execute(q, query_id = run_id, settings = {'query_profiler_real_time_period_ns': 10000000}) print(f'profile\t{query_index}\t{run_id}\t{conn_index}\t{c.last_query.elapsed}') except Exception as e: # Add query id to the exception to make debugging easier. e.args = (run_id, *e.args) e.message = run_id + ': ' + e.message raise elapsed = c.last_query.elapsed profile_seconds += elapsed run += 1 # Don't spend too much time for profile runs if run > args.runs or profile_seconds > 10: break # And don't bother with short queries # Run drop queries drop_queries = substitute_parameters(drop_query_templates) for conn_index, c in enumerate(all_connections): for q in drop_queries: c.execute(q) print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')
#!/usr/bin/python3 import argparse import clickhouse_driver import itertools import functools import math import os import pprint import random import re import statistics import string import sys import time import traceback import xml.etree.ElementTree as et from scipy import stats def tsv_escape(s): return s.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n').replace('\r','') parser = argparse.ArgumentParser(description='Run performance test.') # Explicitly decode files as UTF-8 because sometimes we have Russian characters in queries, and LANG=C is set. parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file') parser.add_argument('--host', nargs='*', default=['localhost'], help="Server hostname(s). Corresponds to '--port' options.") parser.add_argument('--port', nargs='*', default=[9000], help="Server port(s). Corresponds to '--host' options.") parser.add_argument('--runs', type=int, default=1, help='Number of query runs per server.') parser.add_argument('--max-queries', type=int, default=None, help='Test no more than this number of queries, chosen at random.') parser.add_argument('--long', action='store_true', help='Do not skip the tests tagged as long.') parser.add_argument('--print-queries', action='store_true', help='Print test queries and exit.') parser.add_argument('--print-settings', action='store_true', help='Print test settings and exit.') args = parser.parse_args() test_name = os.path.splitext(os.path.basename(args.file[0].name))[0] tree = et.parse(args.file[0]) root = tree.getroot() # Process query parameters subst_elems = root.findall('substitutions/substitution') available_parameters = {} # { 'table': ['hits_10m', 'hits_100m'], ... } for e in subst_elems: available_parameters[e.find('name').text] = [v.text for v in e.findall('values/value')] # Takes parallel lists of templates, substitutes them with all combos of # parameters. The set of parameters is determined based on the first list. # Note: keep the order of queries -- sometimes we have DROP IF EXISTS # followed by CREATE in create queries section, so the order matters. def substitute_parameters(query_templates, other_templates = []): query_results = [] other_results = [[]] * (len(other_templates)) for i, q in enumerate(query_templates): keys = set(n for _, n, _, _ in string.Formatter().parse(q) if n) values = [available_parameters[k] for k in keys] combos = itertools.product(*values) for c in combos: with_keys = dict(zip(keys, c)) query_results.append(q.format(**with_keys)) for j, t in enumerate(other_templates): other_results[j].append(t[i].format(**with_keys)) if len(other_templates): return query_results, other_results else: return query_results # Build a list of test queries, substituting parameters to query templates, # and reporting the queries marked as short. test_queries = [] is_short = [] for e in root.findall('query'): new_queries, [new_is_short] = substitute_parameters([e.text], [[e.attrib.get('short', '0')]]) test_queries += new_queries is_short += [eval(s) for s in new_is_short] assert(len(test_queries) == len(is_short)) # If we're only asked to print the queries, do that and exit if args.print_queries: for q in test_queries: print(q) exit(0) # Print short queries for i, s in enumerate(is_short): if s: print(f'short\t{i}') # If we're only asked to print the settings, do that and exit. These are settings # for clickhouse-benchmark, so we print them as command line arguments, e.g. # '--max_memory_usage=10000000'. if args.print_settings: for s in root.findall('settings/*'): print(f'--{s.tag}={s.text}') exit(0) # Skip long tests if not args.long: for tag in root.findall('.//tag'): if tag.text == 'long': print('skipped\tTest is tagged as long.') sys.exit(0) # Print report threshold for the test if it is set. if 'max_ignored_relative_change' in root.attrib: print(f'report-threshold\t{root.attrib["max_ignored_relative_change"]}') # Open connections servers = [{'host': host, 'port': port} for (host, port) in zip(args.host, args.port)] all_connections = [clickhouse_driver.Client(**server) for server in servers] for s in servers: print('server\t{}\t{}'.format(s['host'], s['port'])) # Run drop queries, ignoring errors. Do this before all other activity, because # clickhouse_driver disconnects on error (this is not configurable), and the new # connection loses the changes in settings. drop_query_templates = [q.text for q in root.findall('drop_query')] drop_queries = substitute_parameters(drop_query_templates) for conn_index, c in enumerate(all_connections): for q in drop_queries: try: c.execute(q) print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}') except: pass # Apply settings. # If there are errors, report them and continue -- maybe a new test uses a setting # that is not in master, but the queries can still run. If we have multiple # settings and one of them throws an exception, all previous settings for this # connection will be reset, because the driver reconnects on error (not # configurable). So the end result is uncertain, but hopefully we'll be able to # run at least some queries. settings = root.findall('settings/*') for conn_index, c in enumerate(all_connections): for s in settings: try: q = f"set {s.tag} = '{s.text}'" c.execute(q) print(f'set\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}') except: print(traceback.format_exc(), file=sys.stderr) # Check tables that should exist. If they don't exist, just skip this test. tables = [e.text for e in root.findall('preconditions/table_exists')] for t in tables: for c in all_connections: try: res = c.execute("select 1 from {} limit 1".format(t)) except: exception_message = traceback.format_exception_only(*sys.exc_info()[:2])[-1] skipped_message = ' '.join(exception_message.split('\n')[:2]) print(f'skipped\t{tsv_escape(skipped_message)}') sys.exit(0) # Run create queries create_query_templates = [q.text for q in root.findall('create_query')] create_queries = substitute_parameters(create_query_templates) # Disallow temporary tables, because the clickhouse_driver reconnects on errors, # and temporary tables are destroyed. We want to be able to continue after some # errors. for q in create_queries: if re.search('create temporary table', q, flags=re.IGNORECASE): print(f"Temporary tables are not allowed in performance tests: '{q}'", file = sys.stderr) sys.exit(1) for conn_index, c in enumerate(all_connections): for q in create_queries: c.execute(q) print(f'create\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}') # Run fill queries fill_query_templates = [q.text for q in root.findall('fill_query')] fill_queries = substitute_parameters(fill_query_templates) for conn_index, c in enumerate(all_connections): for q in fill_queries: c.execute(q) print(f'fill\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}') # Run the queries in randomized order, but preserve their indexes as specified # in the test XML. To avoid using too much time, limit the number of queries # we run per test. queries_to_run = random.sample(range(0, len(test_queries)), min(len(test_queries), args.max_queries or len(test_queries))) # Run test queries. for query_index in queries_to_run: q = test_queries[query_index] query_prefix = f'{test_name}.query{query_index}' # We have some crazy long queries (about 100kB), so trim them to a sane # length. This means we can't use query text as an identifier and have to # use the test name + the test-wide query index. query_display_name = q if len(query_display_name) > 1000: query_display_name = f'{query_display_name[:1000]}...({query_index})' print(f'display-name\t{query_index}\t{tsv_escape(query_display_name)}') # Prewarm: run once on both servers. Helps to bring the data into memory, # precompile the queries, etc. # A query might not run on the old server if it uses a function added in the # new one. We want to run them on the new server only, so that the PR author # can ensure that the test works properly. Remember the errors we had on # each server. query_error_on_connection = [None] * len(all_connections); for conn_index, c in enumerate(all_connections): try: prewarm_id = f'{query_prefix}.prewarm0' # Will also detect too long queries during warmup stage res = c.execute(q, query_id = prewarm_id, settings = {'max_execution_time': 10}) print(f'prewarm\t{query_index}\t{prewarm_id}\t{conn_index}\t{c.last_query.elapsed}') except KeyboardInterrupt: raise except: # FIXME the driver reconnects on error and we lose settings, so this # might lead to further errors or unexpected behavior. query_error_on_connection[conn_index] = traceback.format_exc(); continue # Report all errors that ocurred during prewarm and decide what to do next. # If prewarm fails for the query on all servers -- skip the query and # continue testing the next query. # If prewarm fails on one of the servers, run the query on the rest of them. no_errors = [] for i, e in enumerate(query_error_on_connection): if e: print(e, file = sys.stderr) else: no_errors.append(i) if len(no_errors) == 0: continue elif len(no_errors) < len(all_connections): print(f'partial\t{query_index}\t{no_errors}') this_query_connections = [all_connections[index] for index in no_errors] # Now, perform measured runs. # Track the time spent by the client to process this query, so that we can # notice the queries that take long to process on the client side, e.g. by # sending excessive data. start_seconds = time.perf_counter() server_seconds = 0 profile_seconds = 0 run = 0 # Arrays of run times for each connection. all_server_times = [] for conn_index, c in enumerate(this_query_connections): all_server_times.append([]) while True: run_id = f'{query_prefix}.run{run}' for conn_index, c in enumerate(this_query_connections): try: res = c.execute(q, query_id = run_id) except Exception as e: # Add query id to the exception to make debugging easier. e.args = (run_id, *e.args) e.message = run_id + ': ' + e.message raise elapsed = c.last_query.elapsed all_server_times[conn_index].append(elapsed) server_seconds += elapsed print(f'query\t{query_index}\t{run_id}\t{conn_index}\t{elapsed}') if elapsed > 10: # Stop processing pathologically slow queries, to avoid timing out # the entire test task. This shouldn't really happen, so we don't # need much handling for this case and can just exit. print(f'The query no. {query_index} is taking too long to run ({elapsed} s)', file=sys.stderr) exit(2) # Be careful with the counter, after this line it's the next iteration # already. run += 1 # Try to run any query for at least the specified number of times, # before considering other stop conditions. if run < args.runs: continue # For very short queries we have a special mode where we run them for at # least some time. The recommended lower bound of run time for "normal" # queries is about 0.1 s, and we run them about 10 times, giving the # time per query per server of about one second. Use this value as a # reference for "short" queries. if is_short[query_index]: if server_seconds >= 2 * len(this_query_connections): break # Also limit the number of runs, so that we don't go crazy processing # the results -- 'eqmed.sql' is really suboptimal. if run >= 500: break else: if run >= args.runs: break client_seconds = time.perf_counter() - start_seconds print(f'client-time\t{query_index}\t{client_seconds}\t{server_seconds}') #print(all_server_times) #print(stats.ttest_ind(all_server_times[0], all_server_times[1], equal_var = False).pvalue) # Run additional profiling queries to collect profile data, but only if test times appeared to be different. # We have to do it after normal runs because otherwise it will affect test statistics too much if len(all_server_times) == 2 and stats.ttest_ind(all_server_times[0], all_server_times[1], equal_var = False).pvalue < 0.1: run = 0 while True: run_id = f'{query_prefix}.profile{run}' for conn_index, c in enumerate(this_query_connections): try: res = c.execute(q, query_id = run_id, settings = {'query_profiler_real_time_period_ns': 10000000}) print(f'profile\t{query_index}\t{run_id}\t{conn_index}\t{c.last_query.elapsed}') except Exception as e: # Add query id to the exception to make debugging easier. e.args = (run_id, *e.args) e.message = run_id + ': ' + e.message raise elapsed = c.last_query.elapsed profile_seconds += elapsed run += 1 # Don't spend too much time for profile runs if run > args.runs or profile_seconds > 10: break # And don't bother with short queries # Run drop queries drop_queries = substitute_parameters(drop_query_templates) for conn_index, c in enumerate(all_connections): for q in drop_queries: c.execute(q) print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')
import copy import os import tempfile from functools import wraps from itertools import groupby from typing import List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc from . import config from .utils.logging import get_logger logger = get_logger(__name__) def inject_arrow_table_documentation(arrow_table_method): def wrapper(method): out = wraps(arrow_table_method)(method) out.__doc__ = out.__doc__.replace("pyarrow.Table", "Table") return out return wrapper def _in_memory_arrow_table_from_file(filename: str) -> pa.Table: in_memory_stream = pa.input_stream(filename) opened_stream = pa.ipc.open_stream(in_memory_stream) pa_table = opened_stream.read_all() return pa_table def _in_memory_arrow_table_from_buffer(buffer: pa.Buffer) -> pa.Table: stream = pa.BufferReader(buffer) opened_stream = pa.ipc.open_stream(stream) table = opened_stream.read_all() return table def _memory_mapped_arrow_table_from_file(filename: str) -> pa.Table: memory_mapped_stream = pa.memory_map(filename) opened_stream = pa.ipc.open_stream(memory_mapped_stream) pa_table = opened_stream.read_all() return pa_table def _write_table_to_file(table: pa.Table, filename: str) -> int: with open(filename, "wb") as sink: writer = pa.RecordBatchStreamWriter(sink=sink, schema=table.schema) batches: List[pa.RecordBatch] = table.to_batches() for batch in batches: writer.write_batch(batch) writer.close() return sum(batch.nbytes for batch in batches) def _deepcopy(x, memo: dict): """deepcopy a regular class instance""" cls = x.__class__ result = cls.__new__(cls) memo[id(x)] = result for k, v in x.__dict__.items(): setattr(result, k, copy.deepcopy(v, memo)) return result def _interpolation_search(arr: List[int], x: int) -> int: """ Return the position i of a sorted array so that arr[i] <= x < arr[i+1] Args: arr (:obj:`List[int]`): non-empty sorted list of integers x (:obj:`int`): query Returns: `int`: the position i so that arr[i] <= x < arr[i+1] Raises: `IndexError`: if the array is empty or if the query is outside the array values """ i, j = 0, len(arr) - 1 while i < j and arr[i] <= x < arr[j]: k = i + ((j - i) * (x - arr[i]) // (arr[j] - arr[i])) if arr[k] <= x < arr[k + 1]: return k elif arr[k] < x: i, j = k + 1, j else: i, j = i, k raise IndexError(f"Invalid query '{x}" for size {arr[-1] if len(arr) else "none"}.") class IndexedTableMixin: def __init__(self, table: pa.Table): self._schema = table.schema self._batches = [recordbatch for recordbatch in table.to_batches() if len(recordbatch) > 0] self._offsets: np.ndarray = np.cumsum([0] + [len(b) for b in self._batches], dtype=np.int64) def fast_gather(self, indices: Union[List[int], np.ndarray]) -> pa.Table: """ Create a pa.Table by gathering the records at the records at the specified indices. Should be faster than pa.concat_tables(table.fast_slice(int(i) % table.num_rows, 1) for i in indices) since NumPy can compute the binary searches in parallel, highly optimized C """ assert len(indices), "Indices must be non-empty" batch_indices = np.searchsorted(self._offsets, indices, side="right") - 1 return pa.Table.from_batches( [ self._batches[batch_idx].slice(i - self._offsets[batch_idx], 1) for batch_idx, i in zip(batch_indices, indices) ], schema=self._schema, ) def fast_slice(self, offset=0, length=None) -> pa.Table: """ Slice the Table using interpolation search. The behavior is the same as :obj:`pyarrow.Table.slice` but it's significantly faster. Interpolation search is used to find the start and end indexes of the batches we want to keep. The batches to keep are then concatenated to form the sliced Table. """ if offset < 0: raise IndexError("Offset must be non-negative") elif offset >= self._offsets[-1] or (length is not None and length <= 0): return pa.Table.from_batches([], schema=self._schema) i = _interpolation_search(self._offsets, offset) if length is None or length + offset >= self._offsets[-1]: batches = self._batches[i:] batches[0] = batches[0].slice(offset - self._offsets[i]) else: j = _interpolation_search(self._offsets, offset + length - 1) batches = self._batches[i : j + 1] batches[-1] = batches[-1].slice(0, offset + length - self._offsets[j]) batches[0] = batches[0].slice(offset - self._offsets[i]) return pa.Table.from_batches(batches, schema=self._schema) class Table(IndexedTableMixin): """ Wraps a pyarrow Table by using composition. This is the base class for InMemoryTable, MemoryMappedTable and ConcatenationTable. It implements all the basic attributes/methods of the pyarrow Table class except the Table transforms: slice, filter, flatten, combine_chunks, cast, add_column, append_column, remove_column, set_column, rename_columns and drop. The implementation of these methods differs for the subclasses. """ def __init__(self, table: pa.Table): super().__init__(table) self.table = table def __deepcopy__(self, memo: dict): # arrow tables are immutable, so there's no need to copy self.table # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason # by adding it to the memo, self.table won't be copied memo[id(self.table)] = self.table # same for the recordbatches used by the index memo[id(self._batches)] = list(self._batches) return _deepcopy(self, memo) def __getstate__(self): # We can't pickle objects that are bigger than 4GiB, or it causes OverflowError # So we write the table on disk instead if self.table.nbytes >= config.MAX_TABLE_NBYTES_FOR_PICKLING: table = self.table with tempfile.NamedTemporaryFile("wb", delete=False, suffix=".arrow") as tmp_file: filename = tmp_file.name logger.debug( f"Attempting to pickle a table bigger than 4GiB. Writing it on the disk instead at {filename}" ) _write_table_to_file(table=table, filename=filename) return {"path": filename} else: return {"table": self.table} def __setstate__(self, state): if "path" in state: filename = state["path"] logger.debug(f"Unpickling a big table from the disk at {filename}") table = _in_memory_arrow_table_from_file(filename) logger.debug(f"Removing temporary table file at {filename}") os.remove(filename) else: table = state["table"] Table.__init__(self, table) @inject_arrow_table_documentation(pa.Table.validate) def validate(self, *args, **kwargs): return self.table.validate(*args, **kwargs) @inject_arrow_table_documentation(pa.Table.equals) def equals(self, *args, **kwargs): args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} return self.table.equals(*args, **kwargs) @inject_arrow_table_documentation(pa.Table.to_batches) def to_batches(self, *args, **kwargs): return self.table.to_batches(*args, **kwargs) @inject_arrow_table_documentation(pa.Table.to_pydict) def to_pydict(self, *args, **kwargs): return self.table.to_pydict(*args, **kwargs) @inject_arrow_table_documentation(pa.Table.to_pandas) def to_pandas(self, *args, **kwargs): return self.table.to_pandas(*args, **kwargs) def to_string(self, *args, **kwargs): return self.table.to_string(*args, **kwargs) @inject_arrow_table_documentation(pa.Table.field) def field(self, *args, **kwargs): return self.table.field(*args, **kwargs) @inject_arrow_table_documentation(pa.Table.column) def column(self, *args, **kwargs): return self.table.column(*args, **kwargs) @inject_arrow_table_documentation(pa.Table.itercolumns) def itercolumns(self, *args, **kwargs): return self.table.itercolumns(*args, **kwargs) @property def schema(self): return self.table.schema @property def columns(self): return self.table.columns @property def num_columns(self): return self.table.num_columns @property def num_rows(self): return self.table.num_rows @property def shape(self): return self.table.shape @property def nbytes(self): return self.table.nbytes @property def column_names(self): return self.table.column_names def __eq__(self, other): return self.equals(other) def __getitem__(self, i): return self.table[i] def __len__(self): return len(self.table) def __repr__(self): return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__) def __str__(self): return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__) @inject_arrow_table_documentation(pa.Table.slice) def slice(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.filter) def filter(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.flatten) def flatten(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.combine_chunks) def combine_chunks(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.cast) def cast(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.replace_schema_metadata) def replace_schema_metadata(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.add_column) def add_column(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.append_column) def append_column(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.remove_column) def remove_column(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.set_column) def set_column(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.rename_columns) def rename_columns(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.drop) def drop(self, *args, **kwargs): raise NotImplementedError() class TableBlock(Table): """ TableBlock is the allowed class inside a ConcanetationTable. Only MemoryMappedTable and InMemoryTable are TableBlock. This is because we don't want a ConcanetationTable made out of other ConcanetationTables. """ pass class InMemoryTable(TableBlock): """ The table is said in-memory when it is loaded into the user's RAM. Pickling it does copy all the data using memory. Its implementation is simple and uses the underlying pyarrow Table methods directly. This is different from the MemoryMapped table, for which pickling doesn't copy all the data in memory. For a MemoryMapped, unpickling instead reloads the table from the disk. InMemoryTable must be used when data fit in memory, while MemoryMapped are reserved for data bigger than memory or when you want the memory footprint of your application to stay low. """ @classmethod def from_file(cls, filename: str): table = _in_memory_arrow_table_from_file(filename) return cls(table) @classmethod def from_buffer(cls, buffer: pa.Buffer): table = _in_memory_arrow_table_from_buffer(buffer) return cls(table) @classmethod @inject_arrow_table_documentation(pa.Table.from_pandas) def from_pandas(cls, *args, **kwargs): return cls(pa.Table.from_pandas(*args, **kwargs)) @classmethod @inject_arrow_table_documentation(pa.Table.from_arrays) def from_arrays(cls, *args, **kwargs): return cls(pa.Table.from_arrays(*args, **kwargs)) @classmethod @inject_arrow_table_documentation(pa.Table.from_pydict) def from_pydict(cls, *args, **kwargs): return cls(pa.Table.from_pydict(*args, **kwargs)) @classmethod @inject_arrow_table_documentation(pa.Table.from_batches) def from_batches(cls, *args, **kwargs): return cls(pa.Table.from_batches(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.slice) def slice(self, offset=0, length=None): # Use fast slicing here return InMemoryTable(self.fast_slice(offset=offset, length=length)) @inject_arrow_table_documentation(pa.Table.filter) def filter(self, *args, **kwargs): return InMemoryTable(self.table.filter(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.flatten) def flatten(self, *args, **kwargs): return InMemoryTable(self.table.flatten(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.combine_chunks) def combine_chunks(self, *args, **kwargs): return InMemoryTable(self.table.combine_chunks(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.cast) def cast(self, *args, **kwargs): return InMemoryTable(self.table.cast(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.replace_schema_metadata) def replace_schema_metadata(self, *args, **kwargs): return InMemoryTable(self.table.replace_schema_metadata(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.add_column) def add_column(self, *args, **kwargs): return InMemoryTable(self.table.add_column(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.append_column) def append_column(self, *args, **kwargs): return InMemoryTable(self.table.append_column(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.remove_column) def remove_column(self, *args, **kwargs): return InMemoryTable(self.table.remove_column(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.set_column) def set_column(self, *args, **kwargs): return InMemoryTable(self.table.set_column(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.rename_columns) def rename_columns(self, *args, **kwargs): return InMemoryTable(self.table.rename_columns(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.drop) def drop(self, *args, **kwargs): return InMemoryTable(self.table.drop(*args, **kwargs)) # The MemoryMappedTable needs replays to properly reload tables from the disk Replay = Tuple[str, tuple, dict] class MemoryMappedTable(TableBlock): """ The table is said memory mapped when it doesn't use the user's RAM but loads the data from the disk instead. Pickling it doesn't copy the data into memory. Instead, only the path to the memory mapped arrow file is pickled, as well as the list of transforms to "replay" when reloading the table from the disk. Its implementation requires to store an history of all the transforms that were applied to the underlying pyarrow Table, so that they can be "replayed" when reloading the Table from the disk. This is different from the InMemoryTable table, for which pickling does copy all the data in memory. InMemoryTable must be used when data fit in memory, while MemoryMapped are reserved for data bigger than memory or when you want the memory footprint of your application to stay low. """ def __init__(self, table: pa.Table, path: str, replays: Optional[List[Replay]] = None): super().__init__(table) self.path = path self.replays: List[Replay] = replays if replays is not None else [] @classmethod def from_file(cls, filename: str, replays=None): table = _memory_mapped_arrow_table_from_file(filename) table = cls._apply_replays(table, replays) return cls(table, filename, replays) def __getstate__(self): return {"path": self.path, "replays": self.replays} def __setstate__(self, state): path = state["path"] replays = state["replays"] table = _memory_mapped_arrow_table_from_file(path) table = self._apply_replays(table, replays) MemoryMappedTable.__init__(self, table, path=path, replays=replays) @staticmethod def _apply_replays(table: pa.Table, replays: Optional[List[Replay]] = None) -> pa.Table: if replays is not None: for name, args, kwargs in replays: table = getattr(table, name)(*args, **kwargs) return table def _append_replay(self, replay: Replay) -> List[Replay]: replays = copy.deepcopy(self.replays) replays.append(replay) return replays @inject_arrow_table_documentation(pa.Table.slice) def slice(self, offset=0, length=None): replay = ("slice", (offset, length), {}) replays = self._append_replay(replay) # Use fast slicing here return MemoryMappedTable(self.fast_slice(offset=offset, length=length), self.path, replays) @inject_arrow_table_documentation(pa.Table.filter) def filter(self, *args, **kwargs): replay = ("filter", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.filter(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.flatten) def flatten(self, *args, **kwargs): replay = ("flatten", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.flatten(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.combine_chunks) def combine_chunks(self, *args, **kwargs): replay = ("combine_chunks", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.combine_chunks(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.cast) def cast(self, *args, **kwargs): replay = ("cast", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.cast(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.replace_schema_metadata) def replace_schema_metadata(self, *args, **kwargs): replay = ("replace_schema_metadata", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.replace_schema_metadata(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.add_column) def add_column(self, *args, **kwargs): replay = ("add_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.add_column(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.append_column) def append_column(self, *args, **kwargs): replay = ("append_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.append_column(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.remove_column) def remove_column(self, *args, **kwargs): replay = ("remove_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.remove_column(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.set_column) def set_column(self, *args, **kwargs): replay = ("set_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.set_column(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.rename_columns) def rename_columns(self, *args, **kwargs): replay = ("rename_columns", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.rename_columns(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.drop) def drop(self, *args, **kwargs): replay = ("drop", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.drop(*args, **kwargs), self.path, replays) # A ConcatenationTable is the concatenation of several tables. # The ``blocks`` attributes stores a list of list of blocks. # The first axis concatenates the tables along the axis 0 (it appends rows), # while the second axis concatenates tables along the axis 1 (it appends columns). TableBlockContainer = TypeVar("TableBlockContainer", TableBlock, List[TableBlock], List[List[TableBlock]]) class ConcatenationTable(Table): """ The table comes from the concatenation of several tables called blocks. It enables concatenation on both axis 0 (append rows) and axis 1 (append columns). The underlying tables are called "blocks" and can be either InMemoryTable or MemoryMappedTable objects. This allows to combine tables that come from memory or that are memory mapped. When a ConcatenationTable is pickled, then each block is pickled: - the InMemoryTable objects are pickled by copying all the data in memory; - the MemoryMappedTable objects are pickled without copying the data into memory. Instead, only the path to the memory mapped arrow file is pickled, as well as the list of transforms to "replays" when reloading the table from the disk. Its implementation requires to store each block separately. The ``blocks`` attributes stores a list of list of blocks. The first axis concatenates the tables along the axis 0 (it appends rows), while the second axis concatenates tables along the axis 1 (it appends columns). You can access the fully combined table by accessing the ConcatenationTable.table attribute, and the blocks by accessing the ConcatenationTable.blocks attribute. """ def __init__(self, table: pa.Table, blocks: List[List[TableBlock]]): super().__init__(table) self.blocks = blocks # Check that all the blocks have the right type. # Only InMemoryTable and MemoryMappedTable are allowed. for subtables in blocks: for subtable in subtables: if not isinstance(subtable, TableBlock): raise TypeError( "The blocks of a ConcatenationTable must be InMemoryTable or MemoryMappedTable objects" f", but got {subtable}." ) def __getstate__(self): return {"blocks": self.blocks} def __setstate__(self, state): blocks = state["blocks"] table = self._concat_blocks_horizontally_and_vertically(blocks) ConcatenationTable.__init__(self, table, blocks=blocks) @staticmethod def _concat_blocks(blocks: List[Union[TableBlock, pa.Table]], axis: int = 0) -> pa.Table: pa_tables = [table.table if hasattr(table, "table") else table for table in blocks] if axis == 0: # Align schemas: re-order the columns to make the schemas match before concatenating over rows schema = pa_tables[0].schema pa_tables = [ table if table.schema == schema else pa.Table.from_arrays([table[name] for name in schema.names], names=schema.names) for table in pa_tables ] return pa.concat_tables(pa_tables) elif axis == 1: for i, table in enumerate(pa_tables): if i == 0: pa_table = table else: for name, col in zip(table.column_names, table.columns): pa_table = pa_table.append_column(name, col) return pa_table else: raise ValueError("'axis' must be either 0 or 1") @classmethod def _concat_blocks_horizontally_and_vertically(cls, blocks: List[List[TableBlock]]) -> pa.Table: pa_tables_to_concat_vertically = [] for i, tables in enumerate(blocks): if not tables: continue pa_table_horizontally_concatenated = cls._concat_blocks(tables, axis=1) pa_tables_to_concat_vertically.append(pa_table_horizontally_concatenated) return cls._concat_blocks(pa_tables_to_concat_vertically, axis=0) @classmethod def _merge_blocks(cls, blocks: TableBlockContainer, axis: Optional[int] = None) -> TableBlockContainer: if axis is not None: merged_blocks = [] for is_in_memory, block_group in groupby(blocks, key=lambda x: isinstance(x, InMemoryTable)): if is_in_memory: block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))] merged_blocks += list(block_group) else: # both merged_blocks = [cls._merge_blocks(row_block, axis=1) for row_block in blocks] if all(len(row_block) == 1 for row_block in merged_blocks): merged_blocks = cls._merge_blocks( [block for row_block in merged_blocks for block in row_block], axis=0 ) return merged_blocks @classmethod def _consolidate_blocks(cls, blocks: TableBlockContainer) -> TableBlockContainer: if isinstance(blocks, TableBlock): return blocks elif isinstance(blocks[0], TableBlock): return cls._merge_blocks(blocks, axis=0) else: return cls._merge_blocks(blocks) @classmethod def from_blocks(cls, blocks: TableBlockContainer) -> "ConcatenationTable": blocks = cls._consolidate_blocks(blocks) if isinstance(blocks, TableBlock): table = blocks return cls(table.table, [[table]]) elif isinstance(blocks[0], TableBlock): table = cls._concat_blocks(blocks, axis=0) blocks = [[t] for t in blocks] return cls(table, blocks) else: table = cls._concat_blocks_horizontally_and_vertically(blocks) return cls(table, blocks) @classmethod def from_tables(cls, tables: List[Union[pa.Table, Table]], axis: int = 0) -> "ConcatenationTable": """Create ConcatenationTable from list of tables. Args: tables (list of :class:`Table` or list of :obj:`pyarrow.Table`): List of tables. axis: (``{0, 1}``, default ``0``, meaning over rows): Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns (horizontally). .. versionadded:: 1.6.0 """ def to_blocks(table): if isinstance(table, pa.Table): return [[InMemoryTable(table)]] elif isinstance(table, ConcatenationTable): return copy.deepcopy(table.blocks) else: return [[table]] def _split_like(blocks_to_split, blocks_like): splits = [] offset = 0 for block_row in blocks_like: length = block_row[0].num_rows splits.append((offset, length)) offset += length return [ [block.slice(offset=split[0], length=split[1]) for block in blocks_to_split[0]] for split in splits ] def _extend_blocks(result, blocks: List[List[TableBlock]], axis: int = 0): if axis == 0: result.extend(blocks) elif axis == 1: if len(result) == 1 and len(blocks) > 1: result = _split_like(result, blocks) # Split result elif len(blocks) == 1 and len(result) > 1: blocks = _split_like(blocks, result) # Split blocks # TODO: This assumes each block_row has the same num_rows for i, row_blocks in enumerate(blocks): result[i].extend(row_blocks) return result blocks = to_blocks(tables[0]) for table in tables[1:]: table_blocks = to_blocks(table) blocks = _extend_blocks(blocks, table_blocks, axis=axis) return cls.from_blocks(blocks) @property def _slices(self): offset = 0 for tables in self.blocks: length = len(tables[0]) yield (offset, length) offset += length @inject_arrow_table_documentation(pa.Table.slice) def slice(self, offset=0, length=None): table = self.table.slice(offset, length=length) length = length if length is not None else self.num_rows - offset blocks = [] for tables in self.blocks: n_rows = len(tables[0]) if length == 0: break elif n_rows <= offset: offset = offset - n_rows elif n_rows <= offset + length: blocks.append([t.slice(offset) for t in tables]) length, offset = length + offset - n_rows, 0 else: blocks.append([t.slice(offset, length) for t in tables]) length, offset = 0, 0 return ConcatenationTable(table, blocks) @inject_arrow_table_documentation(pa.Table.filter) def filter(self, mask, *args, **kwargs): table = self.table.filter(mask, *args, **kwargs) blocks = [] for (offset, length), tables in zip(self._slices, self.blocks): submask = mask.slice(offset, length) blocks.append([t.filter(submask, *args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) @inject_arrow_table_documentation(pa.Table.flatten) def flatten(self, *args, **kwargs): table = self.table.flatten(*args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.flatten(*args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) @inject_arrow_table_documentation(pa.Table.combine_chunks) def combine_chunks(self, *args, **kwargs): table = self.table.combine_chunks(*args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.combine_chunks(*args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) @inject_arrow_table_documentation(pa.Table.cast) def cast(self, target_schema, *args, **kwargs): table = self.table.cast(target_schema, *args, **kwargs) blocks = [] for subtables in self.blocks: new_tables = [] fields = list(target_schema) for subtable in subtables: subfields = [] for name in subtable.column_names: subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name))) subschema = pa.schema(subfields) new_tables.append(subtable.cast(subschema, *args, **kwargs)) blocks.append(new_tables) return ConcatenationTable(table, blocks) @inject_arrow_table_documentation(pa.Table.replace_schema_metadata) def replace_schema_metadata(self, *args, **kwargs): table = self.table.replace_schema_metadata(*args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.replace_schema_metadata(*args, **kwargs) for t in tables]) return ConcatenationTable(table, self.blocks) @inject_arrow_table_documentation(pa.Table.add_column) def add_column(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.append_column) def append_column(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.remove_column) def remove_column(self, i, *args, **kwargs): table = self.table.remove_column(i, *args, **kwargs) name = self.table.column_names[i] blocks = [] for tables in self.blocks: blocks.append( [ t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t for t in tables ] ) return ConcatenationTable(table, blocks) @inject_arrow_table_documentation(pa.Table.set_column) def set_column(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.rename_columns) def rename_columns(self, names, *args, **kwargs): table = self.table.rename_columns(names, *args, **kwargs) names = dict(zip(self.table.column_names, names)) blocks = [] for tables in self.blocks: blocks.append( [t.rename_columns([names[name] for name in t.column_names], *args, **kwargs) for t in tables] ) return ConcatenationTable(table, blocks) @inject_arrow_table_documentation(pa.Table.drop) def drop(self, columns, *args, **kwargs): table = self.table.drop(columns) blocks = [] for tables in self.blocks: blocks.append([t.drop([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def concat_tables(tables: List[Table], axis: int = 0) -> Table: """ Concatenate tables. Args: tables (list of :class:`Table`): List of tables to be concatenated. axis (``{0, 1}``, default ``0``, meaning over rows): Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns (horizontally). .. versionadded:: 1.6.0 Returns: :obj:`datasets.table.Table` that is the concatenated table: If the number of input tables is > 1, then the returned table is a :obj:`datasets.table.ConcatenationTable`. Otherwise if there's only one table, it is returned as is. """ tables = list(tables) if len(tables) == 1: return tables[0] return ConcatenationTable.from_tables(tables, axis=axis) def list_table_cache_files(table: Table) -> List[str]: """ Get the cache files that are loaded by the table. Cache file are used when parts of the table come from the disk via memory mapping. Returns: :obj:`List[str]`: a list of paths to the cache files loaded by the table """ if isinstance(table, ConcatenationTable): cache_files = [] for subtables in table.blocks: for subtable in subtables: cache_files += list_table_cache_files(subtable) return cache_files elif isinstance(table, MemoryMappedTable): return [table.path] else: return [] def cast_with_sliced_list_support(pa_table: pa.Table, schema: pa.Schema) -> pa.Table: """Same as pyarrow.Table.cast, except it works for sliced list arrays""" def wrap_for_chunked_arrays(func): """Apply the function on each chunk of a pyarrow.ChunkedArray, or on the array directly""" def wrapper(array): if isinstance(array, pa.ChunkedArray): return pa.chunked_array([func(chunk) for chunk in array.chunks]) else: return func(array) return wrapper @wrap_for_chunked_arrays def reset_sliced_list_offset(array: pa.ListArray): """Return the same pyarrow.ListArray but with array.offset == 0 for compatibility with cast""" if array.offset == 0: return array elif len(array) == 0: return array.values.slice(0, 0) else: values_offset = array.offsets[0] # the relevant values start at this index new_values = array.values.slice(values_offset.as_py()) # get the values to start at the right position new_offsets = pc.subtract(array.offsets, values_offset) # update the offsets accordingly return pa.ListArray.from_arrays(new_offsets, new_values) arrays = [reset_sliced_list_offset(array) if isinstance(array.type, pa.ListType) else array for array in pa_table] return pa.Table.from_arrays(arrays, schema=schema)
import copy import os import tempfile from functools import wraps from itertools import groupby from typing import List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc from . import config from .utils.logging import get_logger logger = get_logger(__name__) def inject_arrow_table_documentation(arrow_table_method): def wrapper(method): out = wraps(arrow_table_method)(method) out.__doc__ = out.__doc__.replace("pyarrow.Table", "Table") return out return wrapper def _in_memory_arrow_table_from_file(filename: str) -> pa.Table: in_memory_stream = pa.input_stream(filename) opened_stream = pa.ipc.open_stream(in_memory_stream) pa_table = opened_stream.read_all() return pa_table def _in_memory_arrow_table_from_buffer(buffer: pa.Buffer) -> pa.Table: stream = pa.BufferReader(buffer) opened_stream = pa.ipc.open_stream(stream) table = opened_stream.read_all() return table def _memory_mapped_arrow_table_from_file(filename: str) -> pa.Table: memory_mapped_stream = pa.memory_map(filename) opened_stream = pa.ipc.open_stream(memory_mapped_stream) pa_table = opened_stream.read_all() return pa_table def _write_table_to_file(table: pa.Table, filename: str) -> int: with open(filename, "wb") as sink: writer = pa.RecordBatchStreamWriter(sink=sink, schema=table.schema) batches: List[pa.RecordBatch] = table.to_batches() for batch in batches: writer.write_batch(batch) writer.close() return sum(batch.nbytes for batch in batches) def _deepcopy(x, memo: dict): """deepcopy a regular class instance""" cls = x.__class__ result = cls.__new__(cls) memo[id(x)] = result for k, v in x.__dict__.items(): setattr(result, k, copy.deepcopy(v, memo)) return result def _interpolation_search(arr: List[int], x: int) -> int: """ Return the position i of a sorted array so that arr[i] <= x < arr[i+1] Args: arr (:obj:`List[int]`): non-empty sorted list of integers x (:obj:`int`): query Returns: `int`: the position i so that arr[i] <= x < arr[i+1] Raises: `IndexError`: if the array is empty or if the query is outside the array values """ i, j = 0, len(arr) - 1 while i < j and arr[i] <= x < arr[j]: k = i + ((j - i) * (x - arr[i]) // (arr[j] - arr[i])) if arr[k] <= x < arr[k + 1]: return k elif arr[k] < x: i, j = k + 1, j else: i, j = i, k raise IndexError(f"Invalid query '{x}' for size {arr[-1] if len(arr) else 'none'}.") class IndexedTableMixin: def __init__(self, table: pa.Table): self._schema = table.schema self._batches = [recordbatch for recordbatch in table.to_batches() if len(recordbatch) > 0] self._offsets: np.ndarray = np.cumsum([0] + [len(b) for b in self._batches], dtype=np.int64) def fast_gather(self, indices: Union[List[int], np.ndarray]) -> pa.Table: """ Create a pa.Table by gathering the records at the records at the specified indices. Should be faster than pa.concat_tables(table.fast_slice(int(i) % table.num_rows, 1) for i in indices) since NumPy can compute the binary searches in parallel, highly optimized C """ assert len(indices), "Indices must be non-empty" batch_indices = np.searchsorted(self._offsets, indices, side="right") - 1 return pa.Table.from_batches( [ self._batches[batch_idx].slice(i - self._offsets[batch_idx], 1) for batch_idx, i in zip(batch_indices, indices) ], schema=self._schema, ) def fast_slice(self, offset=0, length=None) -> pa.Table: """ Slice the Table using interpolation search. The behavior is the same as :obj:`pyarrow.Table.slice` but it's significantly faster. Interpolation search is used to find the start and end indexes of the batches we want to keep. The batches to keep are then concatenated to form the sliced Table. """ if offset < 0: raise IndexError("Offset must be non-negative") elif offset >= self._offsets[-1] or (length is not None and length <= 0): return pa.Table.from_batches([], schema=self._schema) i = _interpolation_search(self._offsets, offset) if length is None or length + offset >= self._offsets[-1]: batches = self._batches[i:] batches[0] = batches[0].slice(offset - self._offsets[i]) else: j = _interpolation_search(self._offsets, offset + length - 1) batches = self._batches[i : j + 1] batches[-1] = batches[-1].slice(0, offset + length - self._offsets[j]) batches[0] = batches[0].slice(offset - self._offsets[i]) return pa.Table.from_batches(batches, schema=self._schema) class Table(IndexedTableMixin): """ Wraps a pyarrow Table by using composition. This is the base class for InMemoryTable, MemoryMappedTable and ConcatenationTable. It implements all the basic attributes/methods of the pyarrow Table class except the Table transforms: slice, filter, flatten, combine_chunks, cast, add_column, append_column, remove_column, set_column, rename_columns and drop. The implementation of these methods differs for the subclasses. """ def __init__(self, table: pa.Table): super().__init__(table) self.table = table def __deepcopy__(self, memo: dict): # arrow tables are immutable, so there's no need to copy self.table # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason # by adding it to the memo, self.table won't be copied memo[id(self.table)] = self.table # same for the recordbatches used by the index memo[id(self._batches)] = list(self._batches) return _deepcopy(self, memo) def __getstate__(self): # We can't pickle objects that are bigger than 4GiB, or it causes OverflowError # So we write the table on disk instead if self.table.nbytes >= config.MAX_TABLE_NBYTES_FOR_PICKLING: table = self.table with tempfile.NamedTemporaryFile("wb", delete=False, suffix=".arrow") as tmp_file: filename = tmp_file.name logger.debug( f"Attempting to pickle a table bigger than 4GiB. Writing it on the disk instead at {filename}" ) _write_table_to_file(table=table, filename=filename) return {"path": filename} else: return {"table": self.table} def __setstate__(self, state): if "path" in state: filename = state["path"] logger.debug(f"Unpickling a big table from the disk at {filename}") table = _in_memory_arrow_table_from_file(filename) logger.debug(f"Removing temporary table file at {filename}") os.remove(filename) else: table = state["table"] Table.__init__(self, table) @inject_arrow_table_documentation(pa.Table.validate) def validate(self, *args, **kwargs): return self.table.validate(*args, **kwargs) @inject_arrow_table_documentation(pa.Table.equals) def equals(self, *args, **kwargs): args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} return self.table.equals(*args, **kwargs) @inject_arrow_table_documentation(pa.Table.to_batches) def to_batches(self, *args, **kwargs): return self.table.to_batches(*args, **kwargs) @inject_arrow_table_documentation(pa.Table.to_pydict) def to_pydict(self, *args, **kwargs): return self.table.to_pydict(*args, **kwargs) @inject_arrow_table_documentation(pa.Table.to_pandas) def to_pandas(self, *args, **kwargs): return self.table.to_pandas(*args, **kwargs) def to_string(self, *args, **kwargs): return self.table.to_string(*args, **kwargs) @inject_arrow_table_documentation(pa.Table.field) def field(self, *args, **kwargs): return self.table.field(*args, **kwargs) @inject_arrow_table_documentation(pa.Table.column) def column(self, *args, **kwargs): return self.table.column(*args, **kwargs) @inject_arrow_table_documentation(pa.Table.itercolumns) def itercolumns(self, *args, **kwargs): return self.table.itercolumns(*args, **kwargs) @property def schema(self): return self.table.schema @property def columns(self): return self.table.columns @property def num_columns(self): return self.table.num_columns @property def num_rows(self): return self.table.num_rows @property def shape(self): return self.table.shape @property def nbytes(self): return self.table.nbytes @property def column_names(self): return self.table.column_names def __eq__(self, other): return self.equals(other) def __getitem__(self, i): return self.table[i] def __len__(self): return len(self.table) def __repr__(self): return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__) def __str__(self): return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__) @inject_arrow_table_documentation(pa.Table.slice) def slice(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.filter) def filter(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.flatten) def flatten(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.combine_chunks) def combine_chunks(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.cast) def cast(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.replace_schema_metadata) def replace_schema_metadata(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.add_column) def add_column(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.append_column) def append_column(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.remove_column) def remove_column(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.set_column) def set_column(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.rename_columns) def rename_columns(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.drop) def drop(self, *args, **kwargs): raise NotImplementedError() class TableBlock(Table): """ TableBlock is the allowed class inside a ConcanetationTable. Only MemoryMappedTable and InMemoryTable are TableBlock. This is because we don't want a ConcanetationTable made out of other ConcanetationTables. """ pass class InMemoryTable(TableBlock): """ The table is said in-memory when it is loaded into the user's RAM. Pickling it does copy all the data using memory. Its implementation is simple and uses the underlying pyarrow Table methods directly. This is different from the MemoryMapped table, for which pickling doesn't copy all the data in memory. For a MemoryMapped, unpickling instead reloads the table from the disk. InMemoryTable must be used when data fit in memory, while MemoryMapped are reserved for data bigger than memory or when you want the memory footprint of your application to stay low. """ @classmethod def from_file(cls, filename: str): table = _in_memory_arrow_table_from_file(filename) return cls(table) @classmethod def from_buffer(cls, buffer: pa.Buffer): table = _in_memory_arrow_table_from_buffer(buffer) return cls(table) @classmethod @inject_arrow_table_documentation(pa.Table.from_pandas) def from_pandas(cls, *args, **kwargs): return cls(pa.Table.from_pandas(*args, **kwargs)) @classmethod @inject_arrow_table_documentation(pa.Table.from_arrays) def from_arrays(cls, *args, **kwargs): return cls(pa.Table.from_arrays(*args, **kwargs)) @classmethod @inject_arrow_table_documentation(pa.Table.from_pydict) def from_pydict(cls, *args, **kwargs): return cls(pa.Table.from_pydict(*args, **kwargs)) @classmethod @inject_arrow_table_documentation(pa.Table.from_batches) def from_batches(cls, *args, **kwargs): return cls(pa.Table.from_batches(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.slice) def slice(self, offset=0, length=None): # Use fast slicing here return InMemoryTable(self.fast_slice(offset=offset, length=length)) @inject_arrow_table_documentation(pa.Table.filter) def filter(self, *args, **kwargs): return InMemoryTable(self.table.filter(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.flatten) def flatten(self, *args, **kwargs): return InMemoryTable(self.table.flatten(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.combine_chunks) def combine_chunks(self, *args, **kwargs): return InMemoryTable(self.table.combine_chunks(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.cast) def cast(self, *args, **kwargs): return InMemoryTable(self.table.cast(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.replace_schema_metadata) def replace_schema_metadata(self, *args, **kwargs): return InMemoryTable(self.table.replace_schema_metadata(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.add_column) def add_column(self, *args, **kwargs): return InMemoryTable(self.table.add_column(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.append_column) def append_column(self, *args, **kwargs): return InMemoryTable(self.table.append_column(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.remove_column) def remove_column(self, *args, **kwargs): return InMemoryTable(self.table.remove_column(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.set_column) def set_column(self, *args, **kwargs): return InMemoryTable(self.table.set_column(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.rename_columns) def rename_columns(self, *args, **kwargs): return InMemoryTable(self.table.rename_columns(*args, **kwargs)) @inject_arrow_table_documentation(pa.Table.drop) def drop(self, *args, **kwargs): return InMemoryTable(self.table.drop(*args, **kwargs)) # The MemoryMappedTable needs replays to properly reload tables from the disk Replay = Tuple[str, tuple, dict] class MemoryMappedTable(TableBlock): """ The table is said memory mapped when it doesn't use the user's RAM but loads the data from the disk instead. Pickling it doesn't copy the data into memory. Instead, only the path to the memory mapped arrow file is pickled, as well as the list of transforms to "replay" when reloading the table from the disk. Its implementation requires to store an history of all the transforms that were applied to the underlying pyarrow Table, so that they can be "replayed" when reloading the Table from the disk. This is different from the InMemoryTable table, for which pickling does copy all the data in memory. InMemoryTable must be used when data fit in memory, while MemoryMapped are reserved for data bigger than memory or when you want the memory footprint of your application to stay low. """ def __init__(self, table: pa.Table, path: str, replays: Optional[List[Replay]] = None): super().__init__(table) self.path = path self.replays: List[Replay] = replays if replays is not None else [] @classmethod def from_file(cls, filename: str, replays=None): table = _memory_mapped_arrow_table_from_file(filename) table = cls._apply_replays(table, replays) return cls(table, filename, replays) def __getstate__(self): return {"path": self.path, "replays": self.replays} def __setstate__(self, state): path = state["path"] replays = state["replays"] table = _memory_mapped_arrow_table_from_file(path) table = self._apply_replays(table, replays) MemoryMappedTable.__init__(self, table, path=path, replays=replays) @staticmethod def _apply_replays(table: pa.Table, replays: Optional[List[Replay]] = None) -> pa.Table: if replays is not None: for name, args, kwargs in replays: table = getattr(table, name)(*args, **kwargs) return table def _append_replay(self, replay: Replay) -> List[Replay]: replays = copy.deepcopy(self.replays) replays.append(replay) return replays @inject_arrow_table_documentation(pa.Table.slice) def slice(self, offset=0, length=None): replay = ("slice", (offset, length), {}) replays = self._append_replay(replay) # Use fast slicing here return MemoryMappedTable(self.fast_slice(offset=offset, length=length), self.path, replays) @inject_arrow_table_documentation(pa.Table.filter) def filter(self, *args, **kwargs): replay = ("filter", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.filter(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.flatten) def flatten(self, *args, **kwargs): replay = ("flatten", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.flatten(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.combine_chunks) def combine_chunks(self, *args, **kwargs): replay = ("combine_chunks", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.combine_chunks(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.cast) def cast(self, *args, **kwargs): replay = ("cast", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.cast(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.replace_schema_metadata) def replace_schema_metadata(self, *args, **kwargs): replay = ("replace_schema_metadata", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.replace_schema_metadata(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.add_column) def add_column(self, *args, **kwargs): replay = ("add_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.add_column(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.append_column) def append_column(self, *args, **kwargs): replay = ("append_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.append_column(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.remove_column) def remove_column(self, *args, **kwargs): replay = ("remove_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.remove_column(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.set_column) def set_column(self, *args, **kwargs): replay = ("set_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.set_column(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.rename_columns) def rename_columns(self, *args, **kwargs): replay = ("rename_columns", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.rename_columns(*args, **kwargs), self.path, replays) @inject_arrow_table_documentation(pa.Table.drop) def drop(self, *args, **kwargs): replay = ("drop", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.drop(*args, **kwargs), self.path, replays) # A ConcatenationTable is the concatenation of several tables. # The ``blocks`` attributes stores a list of list of blocks. # The first axis concatenates the tables along the axis 0 (it appends rows), # while the second axis concatenates tables along the axis 1 (it appends columns). TableBlockContainer = TypeVar("TableBlockContainer", TableBlock, List[TableBlock], List[List[TableBlock]]) class ConcatenationTable(Table): """ The table comes from the concatenation of several tables called blocks. It enables concatenation on both axis 0 (append rows) and axis 1 (append columns). The underlying tables are called "blocks" and can be either InMemoryTable or MemoryMappedTable objects. This allows to combine tables that come from memory or that are memory mapped. When a ConcatenationTable is pickled, then each block is pickled: - the InMemoryTable objects are pickled by copying all the data in memory; - the MemoryMappedTable objects are pickled without copying the data into memory. Instead, only the path to the memory mapped arrow file is pickled, as well as the list of transforms to "replays" when reloading the table from the disk. Its implementation requires to store each block separately. The ``blocks`` attributes stores a list of list of blocks. The first axis concatenates the tables along the axis 0 (it appends rows), while the second axis concatenates tables along the axis 1 (it appends columns). You can access the fully combined table by accessing the ConcatenationTable.table attribute, and the blocks by accessing the ConcatenationTable.blocks attribute. """ def __init__(self, table: pa.Table, blocks: List[List[TableBlock]]): super().__init__(table) self.blocks = blocks # Check that all the blocks have the right type. # Only InMemoryTable and MemoryMappedTable are allowed. for subtables in blocks: for subtable in subtables: if not isinstance(subtable, TableBlock): raise TypeError( "The blocks of a ConcatenationTable must be InMemoryTable or MemoryMappedTable objects" f", but got {subtable}." ) def __getstate__(self): return {"blocks": self.blocks} def __setstate__(self, state): blocks = state["blocks"] table = self._concat_blocks_horizontally_and_vertically(blocks) ConcatenationTable.__init__(self, table, blocks=blocks) @staticmethod def _concat_blocks(blocks: List[Union[TableBlock, pa.Table]], axis: int = 0) -> pa.Table: pa_tables = [table.table if hasattr(table, "table") else table for table in blocks] if axis == 0: # Align schemas: re-order the columns to make the schemas match before concatenating over rows schema = pa_tables[0].schema pa_tables = [ table if table.schema == schema else pa.Table.from_arrays([table[name] for name in schema.names], names=schema.names) for table in pa_tables ] return pa.concat_tables(pa_tables) elif axis == 1: for i, table in enumerate(pa_tables): if i == 0: pa_table = table else: for name, col in zip(table.column_names, table.columns): pa_table = pa_table.append_column(name, col) return pa_table else: raise ValueError("'axis' must be either 0 or 1") @classmethod def _concat_blocks_horizontally_and_vertically(cls, blocks: List[List[TableBlock]]) -> pa.Table: pa_tables_to_concat_vertically = [] for i, tables in enumerate(blocks): if not tables: continue pa_table_horizontally_concatenated = cls._concat_blocks(tables, axis=1) pa_tables_to_concat_vertically.append(pa_table_horizontally_concatenated) return cls._concat_blocks(pa_tables_to_concat_vertically, axis=0) @classmethod def _merge_blocks(cls, blocks: TableBlockContainer, axis: Optional[int] = None) -> TableBlockContainer: if axis is not None: merged_blocks = [] for is_in_memory, block_group in groupby(blocks, key=lambda x: isinstance(x, InMemoryTable)): if is_in_memory: block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))] merged_blocks += list(block_group) else: # both merged_blocks = [cls._merge_blocks(row_block, axis=1) for row_block in blocks] if all(len(row_block) == 1 for row_block in merged_blocks): merged_blocks = cls._merge_blocks( [block for row_block in merged_blocks for block in row_block], axis=0 ) return merged_blocks @classmethod def _consolidate_blocks(cls, blocks: TableBlockContainer) -> TableBlockContainer: if isinstance(blocks, TableBlock): return blocks elif isinstance(blocks[0], TableBlock): return cls._merge_blocks(blocks, axis=0) else: return cls._merge_blocks(blocks) @classmethod def from_blocks(cls, blocks: TableBlockContainer) -> "ConcatenationTable": blocks = cls._consolidate_blocks(blocks) if isinstance(blocks, TableBlock): table = blocks return cls(table.table, [[table]]) elif isinstance(blocks[0], TableBlock): table = cls._concat_blocks(blocks, axis=0) blocks = [[t] for t in blocks] return cls(table, blocks) else: table = cls._concat_blocks_horizontally_and_vertically(blocks) return cls(table, blocks) @classmethod def from_tables(cls, tables: List[Union[pa.Table, Table]], axis: int = 0) -> "ConcatenationTable": """Create ConcatenationTable from list of tables. Args: tables (list of :class:`Table` or list of :obj:`pyarrow.Table`): List of tables. axis: (``{0, 1}``, default ``0``, meaning over rows): Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns (horizontally). .. versionadded:: 1.6.0 """ def to_blocks(table): if isinstance(table, pa.Table): return [[InMemoryTable(table)]] elif isinstance(table, ConcatenationTable): return copy.deepcopy(table.blocks) else: return [[table]] def _split_like(blocks_to_split, blocks_like): splits = [] offset = 0 for block_row in blocks_like: length = block_row[0].num_rows splits.append((offset, length)) offset += length return [ [block.slice(offset=split[0], length=split[1]) for block in blocks_to_split[0]] for split in splits ] def _extend_blocks(result, blocks: List[List[TableBlock]], axis: int = 0): if axis == 0: result.extend(blocks) elif axis == 1: if len(result) == 1 and len(blocks) > 1: result = _split_like(result, blocks) # Split result elif len(blocks) == 1 and len(result) > 1: blocks = _split_like(blocks, result) # Split blocks # TODO: This assumes each block_row has the same num_rows for i, row_blocks in enumerate(blocks): result[i].extend(row_blocks) return result blocks = to_blocks(tables[0]) for table in tables[1:]: table_blocks = to_blocks(table) blocks = _extend_blocks(blocks, table_blocks, axis=axis) return cls.from_blocks(blocks) @property def _slices(self): offset = 0 for tables in self.blocks: length = len(tables[0]) yield (offset, length) offset += length @inject_arrow_table_documentation(pa.Table.slice) def slice(self, offset=0, length=None): table = self.table.slice(offset, length=length) length = length if length is not None else self.num_rows - offset blocks = [] for tables in self.blocks: n_rows = len(tables[0]) if length == 0: break elif n_rows <= offset: offset = offset - n_rows elif n_rows <= offset + length: blocks.append([t.slice(offset) for t in tables]) length, offset = length + offset - n_rows, 0 else: blocks.append([t.slice(offset, length) for t in tables]) length, offset = 0, 0 return ConcatenationTable(table, blocks) @inject_arrow_table_documentation(pa.Table.filter) def filter(self, mask, *args, **kwargs): table = self.table.filter(mask, *args, **kwargs) blocks = [] for (offset, length), tables in zip(self._slices, self.blocks): submask = mask.slice(offset, length) blocks.append([t.filter(submask, *args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) @inject_arrow_table_documentation(pa.Table.flatten) def flatten(self, *args, **kwargs): table = self.table.flatten(*args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.flatten(*args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) @inject_arrow_table_documentation(pa.Table.combine_chunks) def combine_chunks(self, *args, **kwargs): table = self.table.combine_chunks(*args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.combine_chunks(*args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) @inject_arrow_table_documentation(pa.Table.cast) def cast(self, target_schema, *args, **kwargs): table = self.table.cast(target_schema, *args, **kwargs) blocks = [] for subtables in self.blocks: new_tables = [] fields = list(target_schema) for subtable in subtables: subfields = [] for name in subtable.column_names: subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name))) subschema = pa.schema(subfields) new_tables.append(subtable.cast(subschema, *args, **kwargs)) blocks.append(new_tables) return ConcatenationTable(table, blocks) @inject_arrow_table_documentation(pa.Table.replace_schema_metadata) def replace_schema_metadata(self, *args, **kwargs): table = self.table.replace_schema_metadata(*args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.replace_schema_metadata(*args, **kwargs) for t in tables]) return ConcatenationTable(table, self.blocks) @inject_arrow_table_documentation(pa.Table.add_column) def add_column(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.append_column) def append_column(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.remove_column) def remove_column(self, i, *args, **kwargs): table = self.table.remove_column(i, *args, **kwargs) name = self.table.column_names[i] blocks = [] for tables in self.blocks: blocks.append( [ t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t for t in tables ] ) return ConcatenationTable(table, blocks) @inject_arrow_table_documentation(pa.Table.set_column) def set_column(self, *args, **kwargs): raise NotImplementedError() @inject_arrow_table_documentation(pa.Table.rename_columns) def rename_columns(self, names, *args, **kwargs): table = self.table.rename_columns(names, *args, **kwargs) names = dict(zip(self.table.column_names, names)) blocks = [] for tables in self.blocks: blocks.append( [t.rename_columns([names[name] for name in t.column_names], *args, **kwargs) for t in tables] ) return ConcatenationTable(table, blocks) @inject_arrow_table_documentation(pa.Table.drop) def drop(self, columns, *args, **kwargs): table = self.table.drop(columns) blocks = [] for tables in self.blocks: blocks.append([t.drop([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def concat_tables(tables: List[Table], axis: int = 0) -> Table: """ Concatenate tables. Args: tables (list of :class:`Table`): List of tables to be concatenated. axis (``{0, 1}``, default ``0``, meaning over rows): Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns (horizontally). .. versionadded:: 1.6.0 Returns: :obj:`datasets.table.Table` that is the concatenated table: If the number of input tables is > 1, then the returned table is a :obj:`datasets.table.ConcatenationTable`. Otherwise if there's only one table, it is returned as is. """ tables = list(tables) if len(tables) == 1: return tables[0] return ConcatenationTable.from_tables(tables, axis=axis) def list_table_cache_files(table: Table) -> List[str]: """ Get the cache files that are loaded by the table. Cache file are used when parts of the table come from the disk via memory mapping. Returns: :obj:`List[str]`: a list of paths to the cache files loaded by the table """ if isinstance(table, ConcatenationTable): cache_files = [] for subtables in table.blocks: for subtable in subtables: cache_files += list_table_cache_files(subtable) return cache_files elif isinstance(table, MemoryMappedTable): return [table.path] else: return [] def cast_with_sliced_list_support(pa_table: pa.Table, schema: pa.Schema) -> pa.Table: """Same as pyarrow.Table.cast, except it works for sliced list arrays""" def wrap_for_chunked_arrays(func): """Apply the function on each chunk of a pyarrow.ChunkedArray, or on the array directly""" def wrapper(array): if isinstance(array, pa.ChunkedArray): return pa.chunked_array([func(chunk) for chunk in array.chunks]) else: return func(array) return wrapper @wrap_for_chunked_arrays def reset_sliced_list_offset(array: pa.ListArray): """Return the same pyarrow.ListArray but with array.offset == 0 for compatibility with cast""" if array.offset == 0: return array elif len(array) == 0: return array.values.slice(0, 0) else: values_offset = array.offsets[0] # the relevant values start at this index new_values = array.values.slice(values_offset.as_py()) # get the values to start at the right position new_offsets = pc.subtract(array.offsets, values_offset) # update the offsets accordingly return pa.ListArray.from_arrays(new_offsets, new_values) arrays = [reset_sliced_list_offset(array) if isinstance(array.type, pa.ListType) else array for array in pa_table] return pa.Table.from_arrays(arrays, schema=schema)
# -*- coding: utf-8 -*- # ------------------------------------------------------------------ # Filename: <filename> # Purpose: <purpose> # Author: <author> # Email: <email> # # Copyright (C) <copyright> # -------------------------------------------------------------------- """ :copyright: <copyright> :license: GNU Lesser General Public License, Version 3 (http://www.gnu.org/copyleft/lesser.html) """ import numpy as np import scipy.ndimage from .base import Grid from pathlib import Path from uuid import uuid4 import matplotlib.pyplot as plt from loguru import logger import skfmm from multiprocessing import Pool, cpu_count from functools import partial from typing import Optional import h5py from .base import ray_tracer import shutil from uquake.grid import read_grid from .hdf5 import H5TTable, write_hdf5 from scipy.interpolate import interp1d __cpu_count__ = cpu_count() valid_phases = ('P', 'S') valid_grid_types = ( 'VELOCITY', 'VELOCITY_METERS', 'SLOWNESS', 'VEL2', 'SLOW2', 'SLOW2_METERS', 'SLOW_LEN', 'STACK', 'TIME', 'TIME2D', 'PROB_DENSITY', 'MISFIT', 'ANGLE', 'ANGLE2D' ) valid_float_types = { # NLL_type: numpy_type 'FLOAT': 'float32', 'DOUBLE': 'float64' } valid_grid_units = ( 'METER', 'KILOMETER', ) __velocity_grid_location__ = Path('model') __time_grid_location__ = Path('time') __default_grid_units__ = 'METER' __default_float_type__ = 'FLOAT' def validate_phase(phase): if phase not in valid_phases: msg = f'phase should be one of the following valid phases:\n' for valid_phase in valid_phases: msg += f'{valid_phase}\n' raise ValueError(msg) return True def validate_grid_type(grid_type): if grid_type.upper() not in valid_grid_types: msg = f'grid_type = {grid_type} is not valid\n' \ f'grid_type should be one of the following valid grid ' \ f'types:\n' for valid_grid_type in valid_grid_types: msg += f'{valid_grid_type}\n' raise ValueError(msg) return True def validate_grid_units(grid_units): if grid_units.upper() not in valid_grid_units: msg = f'grid_units = {grid_units} is not valid\n' \ f'grid_units should be one of the following valid grid ' \ f'units:\n' for valid_grid_unit in valid_grid_units: msg += f'{valid_grid_unit}\n' raise ValueError(msg) return True def validate_float_type(float_type): if float_type.upper() not in valid_float_types.keys(): msg = f'float_type = {float_type} is not valid\n' \ f'float_type should be one of the following valid float ' \ f'types:\n' for valid_float_type in valid_float_types: msg += f'{valid_float_type}\n' raise ValueError(msg) return True def validate(value, choices): if value not in choices: msg = f'value should be one of the following choices\n:' for choice in choices: msg += f'{choice}\n' raise ValueError(msg) return True class Seeds: __valid_measurement_units__ = ['METERS', 'KILOMETERS'] def __init__(self, sites=[], units='METERS'): """ specifies a series of source location from an inventory object :param sites: a list of sites containing at least the location, and site label :type sites: list of dictionary :Example: >>> site = {'label': 'test', 'x': 1000, 'y': 1000, 'z': 1000, 'elev': 0.0} >>> sites = [site] >>> seeds = Seeds(sites) """ validate(units, self.__valid_measurement_units__) self.units = units self.sites = sites @classmethod def from_inventory(cls, inventory): """ create from an inventory object :param inventory: :type inventory: uquake.core.inventory.Inventory """ srces = [] for site in inventory.sites: srce = {'label': site.code, 'x': site.x, 'y': site.y, 'z': site.z, 'elev': 0} srces.append(srce) return cls(srces) @classmethod def from_json(cls, json): pass def add(self, label, x, y, z, elev=0, units='METERS'): """ Add a single site to the source list :param label: site label :type label: str :param x: x location relative to geographic origin expressed in the units of measurements for site/source :type x: float :param y: y location relative to geographic origin expressed in the units of measurements for site/source :type y: float :param z: z location relative to geographic origin expressed in the units of measurements for site/source :type z: float :param elev: elevation above z grid position (positive UP) in kilometers for site (Default = 0) :type elev: float :param units: units of measurement used to express x, y, and z ( 'METERS' or 'KILOMETERS') """ validate(units.upper(), self.__valid_measurement_units__) self.sites.append({'label': label, 'x': x, 'y': y, 'z': z, 'elev': elev}) self.units = units.upper() @classmethod def generate_random_seeds_in_grid(cls, grid, n_seeds=1): """ generate n_seeds random seeds inside the grid provided. This function is mainly used for testing purposes :param grid: a grid :type grid: uquake.grid.base.Grid or an object inheriting from Grid :param n_seeds: number of seeds to generate :return: a list of seeds >>> from uquake.grid.base import Grid >>> from uquake.grid.nlloc import Seeds >>> grid_dimensions = [10, 10, 10] >>> grid_spacing = [1, 1, 1] >>> grid_origin = [0, 0, 0] >>> grid = Grid(grid_dimensions, grid_spacing, grid_origin, value=1) >>> seeds = Seeds.generate_random_seeds_in_grid(grid, n_seeds=10) """ seeds = cls.__init__() label_root = 'seed' for i, point in enumerate(grid.generate_random_points_in_grid( n_points=n_seeds)): label = f'{label_root}_{i}' seeds.add(label, point[0], point[1], point[2]) return seeds def __repr__(self): line = "" for site in self.sites: # test if site name is shorter than 6 characters line += f'GTSRCE {site['label']} XYZ ' \ f'{site['x'] / 1000:>15.6f} ' \ f'{site['y'] / 1000:>15.6f} ' \ f'{site['z'] / 1000:>15.6f} ' \ f'0.00\n' return line @property def locs(self): seeds = [] for site in self.sites: seeds.append([site['x'], site['y'], site['z']]) return np.array(seeds) @property def labels(self): seed_labels = [] for site in self.sites: seed_labels.append(site['label']) return np.array(seed_labels) # class Srces(Seeds): # def __init__(self, sites=[], units='METERS'): # super().__init__(sites=sites, units=units) class NLLocGrid(Grid): """ base 3D rectilinear grid object """ def __init__(self, data_or_dims, origin, spacing, phase, value=0, grid_type='VELOCITY_METERS', grid_units=__default_grid_units__, float_type="FLOAT", model_id=None): """ :param data_or_dims: data or data dimensions. If dimensions are provided the a homogeneous gris is created with value=value :param origin: origin of the grid :type origin: list :param spacing: the spacing between grid nodes :type spacing: list :param phase: the uquake phase (value 'P' or 'S') :type phase: str :param value: :type value: float :param grid_type: :type grid_type: str :param grid_units: :type grid_units: str :param float_type: :type float_type: str :param model_id: :type model_id: str """ super().__init__(data_or_dims, spacing=spacing, origin=origin, value=value, resource_id=model_id) if validate_phase(phase): self.phase = phase.upper() if validate_grid_type(grid_type): self.grid_type = grid_type.upper() self.extensions = ['.buf', '.mid', '.hdr'] if validate_grid_units(grid_units): self.grid_units = grid_units.upper() if validate_float_type(float_type): self.float_type = float_type.upper() def _write_grid_data(self, base_name, path='.'): Path(path).mkdir(parents=True, exist_ok=True) with open(Path(path) / (base_name + '.buf'), 'wb') \ as out_file: if self.float_type == 'FLOAT': out_file.write(self.data.astype(np.float32).tobytes()) elif self.float_type == 'DOUBLE': out_file.write(self.data.astype(np.float64).tobytes()) def _write_grid_header(self, base_name, path='.', seed_label=None, seed=None, seed_units=None): # convert 'METER' to 'KILOMETER' if self.grid_units == 'METER': origin = self.origin / 1000 spacing = self.spacing / 1000 else: origin = self.origin spacing = self.spacing line1 = f'{self.shape[0]:d} {self.shape[1]:d} {self.shape[2]:d} ' \ f'{origin[0]:f} {origin[1]:f} {origin[2]:f} ' \ f'{spacing[0]:f} {spacing[1]:f} {spacing[2]:f} ' \ f'{self.grid_type}\n' with open(Path(path) / (base_name + '.hdr'), 'w') as out_file: out_file.write(line1) if self.grid_type in ['TIME', 'ANGLE']: if seed_units is None: logger.warning(f'seed_units are not defined. ' f'Assuming same units as grid (' f'{self.grid_units}') if self.grid_units == 'METER': seed = seed / 1000 line2 = u"%s %f %f %f\n" % (seed_label, seed[0], seed[1], seed[2]) out_file.write(line2) out_file.write(u'TRANSFORM NONE\n') return True def _write_grid_model_id(self, base_name, path='.'): with open(Path(path) / (base_name + '.mid'), 'w') as out_file: out_file.write(f'{self.model_id}') return True def write(self, base_name, path='.'): self._write_grid_data(base_name, path=path) self._write_grid_header(base_name, path=path) self._write_grid_model_id(base_name, path=path) return True def mv(self, base_name, origin, destination): """ move a NLLoc grid with a certain base_name from an origin to a destination :param NLLocGridObject: :type NLLocGridObject: uquake.grid.nlloc.NLLocGrid :param base_name: :type base_name: str :param origin: :type origin: str :param destination: :type destination: str :return: """ self.write(base_name, destination) for ext in self.extensions: shutil.move(f'{origin}/{base_name}.{ext}', f'{destination}/{base_name}.{ext}') @property def model_id(self): return self.resource_id class ModelLayer: """ 1D model varying in Z """ def __init__(self, z_top, value_top): """ :param z_top: Top of the layer z coordinates :param value_top: Value at the top of the layer """ self.z_top = z_top self.value_top = value_top def __repr__(self): return f'top - {self.z_top:5d} | value - {self.value_top:5d}\n' class LayeredVelocityModel(object): def __init__(self, model_id=None, velocity_model_layers=None, phase='P', grid_units='METER', float_type=__default_float_type__, gradient=False): """ Initialize :param model_id: model id, if not set the model ID is set using UUID :type model_id: str :param velocity_model_layers: a list of VelocityModelLayer :type velocity_model_layers: list :param phase: Phase either 'P' or 'S' :type phase: str """ if velocity_model_layers is None: self.velocity_model_layers = [] if validate_phase(phase): self.phase = phase.upper() if validate_grid_units(grid_units): self.grid_units = grid_units.upper() if validate_float_type(float_type): self.float_type = float_type.upper() self.grid_type = 'VELOCITY' if model_id is None: model_id = str(uuid4()) self.model_id = model_id self.gradient = gradient def __repr__(self): output = '' for i, layer in enumerate(self.velocity_model_layers): output += f'layer {i + 1:4d} | {layer}' return output def add_layer(self, layer): """ Add a layer to the model. The layers must be added in sequence from the top to the bottom :param layer: a LayeredModel object """ if not (type(layer) is ModelLayer): raise TypeError('layer must be a VelocityModelLayer object') if self.velocity_model_layers is None: self.velocity_model_layers = [layer] else: self.velocity_model_layers.append(layer) def gen_1d_model(self, z_min, z_max, spacing): # sort the layers to ensure the layers are properly ordered z = [] v = [] for layer in self.velocity_model_layers: z.append(layer.z_top) v.append(layer.value_top) if np.max(z) < z_max: i_z_max = np.argmax(z) v_z_max = v[i_z_max] z.append(z_max) v.append(v_z_max) if np.min(z) > z_min: i_z_min = np.argmin(z) v_z_min = v[i_z_min] z.append(z_min) v.append(v_z_min) i_sort = np.argsort(z) z = np.array(z) v = np.array(v) z = z[i_sort] v = v[i_sort] z_interp = np.arange(z_min, z_max, spacing[2]) kind = 'previous' if self.gradient: kind = 'linear' f_interp = interp1d(z, v, kind=kind) v_interp = f_interp(z_interp) return z_interp, v_interp def gen_3d_grid(self, network_code, dims, origin, spacing): model_grid_3d = VelocityGrid3D.from_layered_model(self, network_code, dims, origin, spacing) return model_grid_3d def plot(self, z_min, z_max, spacing, *args, **kwargs): """ Plot the 1D velocity model :param z_min: lower limit of the model :param z_max: upper limit of the model :param spacing: plotting resolution in z :return: matplotlib axis """ z_interp, v_interp = self.gen_1d_model(z_min, z_max, spacing) x_label = None if self.phase == 'P': x_label = 'P-wave velocity' elif self.phase == 'S': x_label = 's-wave velocity' if self.grid_units == 'METER': units = 'm' else: units = 'km' y_label = f'z [{units}]' ax = plt.axes() ax.plot(v_interp, z_interp, *args, **kwargs) plt.xlabel(x_label) plt.ylabel(y_label) ax.set_aspect(2) plt.tight_layout() return ax class VelocityGrid3D(NLLocGrid): def __init__(self, network_code, data_or_dims, origin, spacing, phase='P', value=0, float_type=__default_float_type__, model_id=None, **kwargs): self.network_code = network_code if (type(spacing) is int) | (type(spacing) is float): spacing = [spacing, spacing, spacing] super().__init__(data_or_dims, origin, spacing, phase, value=value, grid_type='VELOCITY_METERS', grid_units='METER', float_type=float_type, model_id=model_id) @staticmethod def get_base_name(network_code, phase): """ return the base name given a network code and a phase :param network_code: Code of the network :type network_code: str :param phase: Phase, either P or S :type phase: str either 'P' or 'S' :return: the base name """ validate_phase(phase) return f'{network_code.upper()}.{phase.upper()}.mod' @classmethod def from_ocd(cls, origin, corner, dimensions, val=0): pass @classmethod def from_ocs(cls, origin, corner, spacing, val=0): pass @classmethod def from_ocd(cls, origin, dimensions, spacing, val=0): pass @classmethod def from_layered_model(cls, layered_model, network_code, dims, origin, spacing, **kwargs): """ Generating a 3D grid model from :param network_code: :param layered_model: :param dims: :param origin: :param spacing: :param kwargs: :return: """ z_min = origin[-1] z_max = z_min + spacing[-1] * dims[-1] z_interp, v_interp = layered_model.gen_1d_model(z_min, z_max, spacing) data = np.zeros(dims) for i, v in enumerate(v_interp): data[:, :, i] = v_interp[i] return cls(network_code, data, origin, spacing, phase=layered_model.phase, float_type=layered_model.float_type, model_id=layered_model.model_id, **kwargs) def to_slow_lens(self): data = self.spacing[0] / self.data return NLLocGrid(data, self.origin, self.spacing, self.phase, grid_type='SLOW_LEN', grid_units=self.grid_units, float_type=self.float_type, model_id=self.model_id) @classmethod def from_slow_len(cls, grid: NLLocGrid, network_code: str): data = np.mean(grid.spacing) / grid.data return cls(network_code, data, grid.origin, grid.spacing, phase=grid.phase, float_type=grid.float_type, model_id=grid.model_id) def to_time(self, seed, seed_label, sub_grid_resolution=0.1, *args, **kwargs): """ Eikonal solver based on scikit fast marching solver :param seed: numpy array location of the seed or origin of useis wave in model coordinates (usually location of a station or an event) :type seed: numpy.array or list :param seed_label: seed label (name of station) :type seed_label: basestring :param sub_grid_resolution: resolution of the grid around the seed. Propagating the wavefront on a denser grid around the seed, significantly improves the travel time accuracy. The value represents a fraction of the grid resolution. For instance, assuming a grid with spacing of 10m, if the sub_grid_resolution is set to 0.1, the resolution around the grid will be 1m. :rtype: TTGrid """ if isinstance(seed, list): seed = np.array(seed) if not self.in_grid(seed): logger.warning(f'{seed_label} is outside the grid. ' f'The travel time grid will not be calculated') return origin = self.origin shape = self.shape spacing = self.spacing sub_grid_spacing = spacing * sub_grid_resolution # extent = ((4 * sub_grid_spacing) * 1.2 + sub_grid_spacing) n_pts_inner_grid = (4 * spacing / sub_grid_spacing * 1.2).astype(int) for i in range(0, len(n_pts_inner_grid)): if n_pts_inner_grid[i] % 2: n_pts_inner_grid[i] += 1 x_i = np.arange(0, n_pts_inner_grid[0]) * sub_grid_spacing[0] y_i = np.arange(0, n_pts_inner_grid[1]) * sub_grid_spacing[1] z_i = np.arange(0, n_pts_inner_grid[2]) * sub_grid_spacing[2] x_i = x_i - np.mean(x_i) + seed[0] y_i = y_i - np.mean(y_i) + seed[1] z_i = z_i - np.mean(z_i) + seed[2] X_i, Y_i, Z_i = np.meshgrid(x_i, y_i, z_i, indexing='ij') coords = np.array([X_i.ravel(), Y_i.ravel(), Z_i.ravel()]).T vel = self.interpolate(coords, grid_space=False).reshape( X_i.shape) phi = np.ones_like(X_i) phi[int(np.floor(len(x_i) / 2)), int(np.floor(len(y_i) / 2)), int(np.floor(len(z_i) / 2))] = 0 tt_tmp = skfmm.travel_time(phi, vel, dx=sub_grid_spacing) tt_tmp_grid = TTGrid(self.network_code, tt_tmp, [x_i[0], y_i[0], z_i[0]], sub_grid_spacing, seed, seed_label, phase=self.phase, float_type=self.float_type, model_id=self.model_id, grid_units=self.grid_units) data = self.data xe = origin[0] + np.arange(0, shape[0], 1) * spacing[0] ye = origin[1] + np.arange(0, shape[1], 1) * spacing[1] ze = origin[2] + np.arange(0, shape[2], 1) * spacing[2] Xe, Ye, Ze = np.meshgrid(xe, ye, ze, indexing='ij') coords = np.array([Xe.ravel(), Ye.ravel(), Ze.ravel()]) corner1 = np.array([np.min(x_i), np.min(y_i), np.min(z_i)]) corner2 = np.array([np.max(x_i), np.max(y_i), np.max(z_i)]) test = ((coords[0, :] >= corner1[0]) & (coords[0, :] <= corner2[0]) & (coords[1, :] >= corner1[1]) & (coords[1, :] <= corner2[1]) & (coords[2, :] >= corner1[2]) & (coords[2, :] <= corner2[2])) Xe_grid = Xe.ravel()[test] Ye_grid = Ye.ravel()[test] Ze_grid = Ze.ravel()[test] X = np.array([Xe_grid, Ye_grid, Ze_grid]).T tt_interp = tt_tmp_grid.interpolate(X, grid_space=False, order=3)[0] bias = np.max(tt_interp) phi_out = np.ones_like(Xe).ravel() phi_out[test] = tt_interp - bias phi_out = phi_out.reshape(Xe.shape) tt_out = skfmm.travel_time(phi_out, data, dx=spacing) # tt_out = tt_out.ravel() + bias tt_out = tt_out.ravel() + bias tt_out[test] = tt_interp tt_out = tt_out.reshape(Xe.shape) tt_out_grid = TTGrid(self.network_code, tt_out, self.origin, self.spacing, seed, seed_label, phase=self.phase, float_type=self.float_type, model_id=self.model_id, grid_units=self.grid_units) tt_out_grid.data -= tt_out_grid.interpolate(seed.T, grid_space=False, order=3)[0] return tt_out_grid def to_time_multi_threaded(self, seeds, seed_labels, cpu_utilisation=0.9, *args, **kwargs): """ Multi-threaded version of the Eikonal solver based on scikit fast marching solver :param seeds: array of seed :type seeds: np.array :param seed_labels: array of seed_labels :type seed_labels: np.array :param cpu_utilisation: fraction of the cpu core to be used for the processing task (between 0 and 1) :type cpu_utilisation: float between 0 and 1 :param args: arguments to be passed directly to skfmm.travel_time function :param kwargs: keyword arguments to be passed directly to skfmm.travel_time function :return: a travel time grid ensemble :rtype: TravelTimeEnsemble """ num_threads = int(np.ceil(cpu_utilisation * __cpu_count__)) # ensuring that the number of threads is comprised between 1 and # __cpu_count__ num_threads = np.max([np.min([num_threads, __cpu_count__]), 1]) data = [] for seed, seed_label in zip(seeds, seed_labels): if not self.in_grid(seed): logger.warning(f'{seed_label} is outside the grid. ' f'The travel time grid will not be calculated') continue data.append((seed, seed_label)) with Pool(num_threads) as pool: results = pool.starmap(self.to_time, data) tt_grid_ensemble = TravelTimeEnsemble(results) return tt_grid_ensemble def write(self, path='.'): base_name = self.base_name super().write(base_name, path=path) def mv(self, origin, destination): """ move a the velocity grid files from {origin} to {destination} :param origin: origin :param destination: :return: """ super().mv(self, self.base_name, origin, destination) @property def base_name(self): return self.get_base_name(self.network_code, self.phase) class VelocityGridEnsemble: def __init__(self, p_velocity_grid, s_velocity_grid): """ :param p_velocity_grid: p-wave 3D velocity grid :type p_velocity_grid: VelocityGrid3D :param s_velocity_grid: s-wave 3D velocity grid :type s_velocity_grid: VelocityGrid3D """ self.p_velocity_grid = p_velocity_grid self.s_velocity_grid = s_velocity_grid self.__i__ = 0 def __getitem__(self, item): if item.upper() == 'P': return self.p_velocity_grid elif item.upper() == 'S': return self.s_velocity_grid else: raise ValueError(f'{item} is not a valid key. ' f'The key value must either be "P" or "S"') def __iter__(self): self.__i__ = 0 return self def __next__(self): if self.__i__ < 2: if self.__i__ == '0': return self.p_velocity_grid elif self.__i__ == '1': return self.s_velocity_grid else: raise StopIteration # @property # def keys(self): # return ['P', 'S'] def keys(self): return ['P', 'S'] def write(self, path='.'): for key in self.keys(): self[key].write(path=path) def to_time_multi_threaded(self, seeds, seed_labels, cpu_utilisation=0.9, *args, **kwargs): tt_grid_ensemble = TravelTimeEnsemble([]) for key in self.keys(): tt_grids = self[key].to_time_multi_threaded(seeds, seed_labels, cpu_utilisation= cpu_utilisation, *args, **kwargs) tt_grid_ensemble += tt_grids return tt_grid_ensemble def to_time(self, seeds, seed_labels, multi_threaded=False, sub_grid_resolution=0.1, *args, **kwargs): """ Convert the velocity grids to travel-time :param seeds: a list of seeds usually represents site location :type seeds: numpy.array :param seed_labels: a list of seed labels, usually represents site codes :type seed_labels: list :param multi_threaded: if true, the travel-time grid will used multithreading :param sub_grid_resolution: sub grid resolution for near source solution in fraction of grid resolution :param args: :param kwargs: :return: Travel time grid ensemble :rtype: ~uquake.grid.nlloc.TTGridEnsemble """ if multi_threaded: return self.to_time_multi_threaded(seeds, seed_labels, sub_grid_resolution= sub_grid_resolution, *args, **kwargs) travel_time_grids = [] for seed, seed_label in zip(seeds, seed_labels): for key in self.keys(): tg = self[key].to_time(seed, seed_label, sub_grid_resolution =sub_grid_resolution, *args, **kwargs) travel_time_grids.append(tg) return TravelTimeEnsemble(travel_time_grids) class SeededGrid(NLLocGrid): """ container for seeded grids (e.g., travel time, azimuth and take off angle) """ __valid_grid_type__ = ['TIME', 'TIME2D', 'ANGLE', 'ANGLE2D'] def __init__(self, network_code, data_or_dims, origin, spacing, seed, seed_label, phase='P', value=0, grid_units=__default_grid_units__, grid_type='TIME', float_type="FLOAT", model_id=None): self.seed = seed self.seed_label = seed_label self.network_code = network_code if grid_type not in self.__valid_grid_type__: raise ValueError() self.grid_type = grid_type super().__init__(data_or_dims, origin, spacing, phase=phase, value=value, grid_type='TIME', grid_units=grid_units, float_type=float_type, model_id=model_id) def __repr__(self): line = f'Travel Time Grid\n' \ f' origin : {self.origin}\n' \ f' spacing : {self.spacing}\n' \ f' dimensions : {self.shape}\n' \ f' seed label : {self.seed_label}\n' \ f' seed location : {self.seed}' return line @classmethod def get_base_name(cls, network_code, phase, seed_label, grid_type): validate_phase(phase) if grid_type not in cls.__valid_grid_type__: raise ValueError(f'{grid_type} is not a valid grid type') base_name = f'{network_code}.{phase}.{seed_label}.' \ f'{grid_type.lower()}' return base_name @property def base_name(self): base_name = self.get_base_name(self.network_code, self.phase, self.seed_label, self.grid_type) return base_name def write(self, path='.'): base_name = self.base_name self._write_grid_data(base_name, path=path) self._write_grid_header(base_name, path=path, seed=self.seed, seed_label=self.seed_label, seed_units=self.grid_units) self._write_grid_model_id(base_name, path=path) class TTGrid(SeededGrid): def __init__(self, network_code, data_or_dims, origin, spacing, seed, seed_label, phase='P', value=0, float_type="FLOAT", model_id=None, grid_units='METER'): super().__init__(network_code, data_or_dims, origin, spacing, seed, seed_label, phase=phase, value=value, grid_type='TIME', float_type=float_type, model_id=model_id, grid_units=grid_units) def to_azimuth(self): """ This function calculate the take off angle and azimuth for every grid point given a travel time grid calculated using an Eikonal solver :return: azimuth and takeoff angles grids .. Note: The convention for the takeoff angle is that 0 degree is down. """ gds_tmp = np.gradient(self.data) gds = [-gd for gd in gds_tmp] azimuth = np.arctan2(gds[0], gds[1]) * 180 / np.pi # azimuth is zero northwards return AngleGrid(self.network_code, azimuth, self.origin, self.spacing, self.seed, self.seed_label, 'AZIMUTH', phase=self.phase, float_type=self.float_type, model_id=self.model_id, grid_units=self.grid_units) def to_takeoff(self): gds_tmp = np.gradient(self.data) gds = [-gd for gd in gds_tmp] hor = np.sqrt(gds[0] ** 2 + gds[1] ** 2) takeoff = np.arctan2(hor, -gds[2]) * 180 / np.pi # takeoff is zero pointing down return AngleGrid(self.network_code, takeoff, self.origin, self.spacing, self.seed, self.seed_label, 'TAKEOFF', phase=self.phase, float_type=self.float_type, model_id=self.model_id, grid_units=self.grid_units) def to_azimuth_point(self, coord, grid_space=False, mode='nearest', order=1, **kwargs): """ calculate the azimuth angle at a particular point on the grid for a given seed location :param coord: coordinates at which to calculate the takeoff angle :param grid_space: true if the coordinates are expressed in grid space (indices can be fractional) as opposed to model space (x, y, z) :param mode: interpolation mode :param order: interpolation order :return: takeoff angle at the location coord """ return self.to_azimuth().interpolate(coord, grid_space=grid_space, mode=mode, order=order, **kwargs)[0] def to_takeoff_point(self, coord, grid_space=False, mode='nearest', order=1, **kwargs): """ calculate the takeoff angle at a particular point on the grid for a given seed location :param coord: coordinates at which to calculate the takeoff angle :param grid_space: true if the coordinates are expressed in grid space (indices can be fractional) as opposed to model space (x, y, z) :param mode: interpolation mode :param order: interpolation order :return: takeoff angle at the location coord """ return self.to_takeoff().interpolate(coord, grid_space=grid_space, mode=mode, order=order, **kwargs)[0] def ray_tracer(self, start, grid_space=False, max_iter=1000, arrival_id=None): """ This function calculates the ray between a starting point (start) and an end point, which should be the seed of the travel_time grid, using the gradient descent method. :param start: the starting point (usually event location) :type start: tuple, list or numpy.array :param grid_space: true if the coordinates are expressed in grid space (indices can be fractional) as opposed to model space (x, y, z) :param max_iter: maximum number of iteration :param arrival_id: id of the arrival associated to the ray if applicable :rtype: numpy.array """ return ray_tracer(self, start, grid_space=grid_space, max_iter=max_iter, arrival_id=arrival_id, earth_model_id=self.model_id, network=self.network_code) @classmethod def from_velocity(cls, seed, seed_label, velocity_grid): return velocity_grid.to_time(seed, seed_label) def write(self, path='.'): return super().write(path=path) @property def site(self): return self.seed_label class TravelTimeEnsemble: def __init__(self, travel_time_grids): """ Combine a list of travel time grids together providing meta functionality (multi-threaded ray tracing, sorting, travel-time calculation for a specific location etc.). It is assumed that all grids are compatible, i.e., that all the grids have the same origin, spacing and dimensions. :param travel_time_grids: a list of TTGrid objects """ self.travel_time_grids = travel_time_grids self.__i__ = 0 for tt_grid in self.travel_time_grids: try: assert tt_grid.check_compatibility(travel_time_grids[0]) except: raise AssertionError('grids are not all compatible') def __len__(self): return len(self.travel_time_grids) def __add__(self, other): for travel_time_grid in other.travel_time_grids: self.travel_time_grids.append(travel_time_grid) return TravelTimeEnsemble(self.travel_time_grids) def __iter__(self): self.__i__ = 0 return self def __next__(self): if self.__i__ < len(self): result = self.travel_time_grids[self.__i__] self.__i__ += 1 return result else: raise StopIteration def __getitem__(self, item): if isinstance(item, int): return self.travel_time_grids[item] if isinstance(item, str): tt_grid_out = None for travel_time_grid in self.travel_time_grids: if travel_time_grid.seed_label == item: return travel_time_grid raise KeyError(f'{item} not found') def __repr__(self): line = f'Number of travel time grids: {len(self)}' return line @classmethod def from_files(cls, path): """ create a travel time ensemble from files located in a directory :param path: the base path to the directory containing the travel time files. :return: """ tt_grids = [] for fle in Path(path).glob('*time*.hdr'): path = fle.parent base_name = '.'.join(fle.name.split('.')[:-1]) fname = str(Path(path) / base_name) tt_grid = read_grid(fname, format='NLLOC', float_type=__default_float_type__) tt_grids.append(tt_grid) return cls(tt_grids) def select(self, seed_labels: Optional[list] = None, phase: Optional[list] = None): """ return the a list of grid corresponding to seed_labels. :param seed_labels: seed labels of the travel time grids to return :param phase: the phase {'P' or 'S'}, both if None. :return: a list of travel time grids :rtype: TravelTimeEnsemble """ if (seed_labels is None) and (phase is None): return self tmp = [] if seed_labels is None: seed_labels = np.unique(self.seed_labels) if phase is None: phase = ['P', 'S'] returned_grids = [] for travel_time_grid in self.travel_time_grids: if travel_time_grid.seed_label in seed_labels: if travel_time_grid.phase in phase: returned_grids.append(travel_time_grid) return TravelTimeEnsemble(returned_grids) def sort(self, ascending:bool = True): """ sorting the travel time grid by seed_label :param ascending: if true the grids are sorted in ascending order :param ascending: bool :return: sorted travel time grids. :rtype: TravelTimeEnsemble """ i = np.sort(self.seed_labels) if not ascending: i = i[-1::-1] sorted_tt_grids = np.array(self.travel_time_grids)[i] return TravelTimeEnsemble(sorted_tt_grids) def travel_time(self, seed, grid_space: bool = False, seed_labels: Optional[list] = None, phase: Optional[list] = None): """ calculate the travel time at a specific point for a series of site ids :param seed: travel time seed :param grid_space: true if the coordinates are expressed in grid space (indices can be fractional) as opposed to model space (x, y, z) :param seed_labels: a list of sites from which to calculate the travel time. :param phase: a list of phases for which the travel time need to be calculated :return: a list of dictionary containing the travel time and site id """ if isinstance(seed, list): seed = np.array(seed) if grid_space: seed = self.travel_time_grids[0].transform_from(seed) if not self.travel_time_grids[0].in_grid(seed): raise ValueError('seed is outside the grid') tt_grids = self.select(seed_labels=seed_labels, phase=phase) tts = [] labels = [] phases = [] for tt_grid in tt_grids: labels.append(tt_grid.seed_label) tts.append(tt_grid.interpolate(seed.T, grid_space=False)[0]) phases.append(tt_grid.phase) tts_dict = {} for phase in np.unique(phases): tts_dict[phase] = {} for label, tt, phase in zip(labels, tts, phases): tts_dict[phase][label] = tt return tts_dict def angles(self, seed, grid_space: bool = False, seed_labels: Optional[list] = None, phase: Optional[list] = None, **kwargs): """ calculate the azimuth at a specific point for a series of site ids :param seed: travel time seed :param grid_space: true if the coordinates are expressed in grid space (indices can be fractional) as opposed to model space (x, y, z) :param seed_labels: a list of sites from which to calculate the travel time. :param phase: a list of phases for which the travel time need to be calculated :return: a list of dictionary containing the azimuth and site id """ if isinstance(seed, list): seed = np.array(seed) if grid_space: seed = self.travel_time_grids[0].transform_from(seed) if not self.travel_time_grids[0].in_grid(seed): raise ValueError('seed is outside the grid') tt_grids = self.select(seed_labels=seed_labels, phase=phase) azimuths = [] takeoffs = [] labels = [] phases = [] for tt_grid in tt_grids: labels.append(tt_grid.seed_label) azimuths.append(tt_grid.to_azimuth_point(seed.T, grid_space=False, **kwargs)) takeoffs.append(tt_grid.to_takeoff_point(seed.T, grid_space=False, **kwargs)) phases.append(tt_grid.phase) azimuth_dict = {} takeoff_dict = {} for phase in np.unique(phases): azimuth_dict[phase] = {} takeoff_dict[phase] = {} for label, azimuth, takeoff, phase in zip(labels, azimuths, takeoffs, phases): takeoff_dict[phase][label] = takeoff azimuth_dict[phase][label] = azimuth angle_dict = {} angle_dict['takeoff'] = takeoff_dict angle_dict['azimuth'] = azimuth_dict return angle_dict def ray_tracer(self, start, seed_labels=None, multithreading=False, cpu_utilisation=0.9, grid_space=False, max_iter=1000): """ :param start: origin of the ray, usually the location of an event :param seed_labels: a list of seed labels :param grid_space: true if the coordinates are expressed in grid space (indices can be fractional) as opposed to model space (x, y, z) :param multithreading: if True use multithreading :param max_iter: maximum number of iteration :param cpu_utilisation: fraction of core to use, between 0 and 1. The number of core to be use is bound between 1 and the total number of cores :return: a list of ray :rtype: list """ travel_time_grids = self.select(seed_labels=seed_labels) kwargs = {'grid_space': grid_space, 'max_iter': max_iter} if multithreading: ray_tracer_func = partial(ray_tracer, **kwargs) num_threads = int(np.ceil(cpu_utilisation * __cpu_count__)) # ensuring that the number of threads is comprised between 1 and # __cpu_count__ num_threads = np.max([np.min([num_threads, __cpu_count__]), 1]) data = [] for travel_time_grid in travel_time_grids: data.append((travel_time_grid, start)) with Pool(num_threads) as pool: results = pool.starmap(ray_tracer_func, data) for result in results: result.network = self.travel_time_grids[0].network_code else: results = [] for travel_time_grid in travel_time_grids: results.append(travel_time_grid.ray_tracer(start, **kwargs)) return results @property def seeds(self): seeds = [] for seed_label in self.seed_labels: seeds.append(self.select(seed_labels=seed_label)[0].seed) return np.array(seeds) @property def seed_labels(self): seed_labels = [] for grid in self.travel_time_grids: seed_labels.append(grid.seed_label) return np.unique(np.array(seed_labels)) @property def shape(self): return self.travel_time_grids[0].shape @property def origin(self): return self.travel_time_grids[0].origin @property def spacing(self): return self.travel_time_grids[0].spacing def write(self, path='.'): for travel_time_grid in self.travel_time_grids: travel_time_grid.write(path=path) def write_hdf5(self, file_name): write_hdf5(file_name, self) def to_hdf5(self, file_name): self.write_hdf5(file_name) return H5TTable(file_name) class AngleGrid(SeededGrid): def __init__(self, network_code, data_or_dims, origin, spacing, seed, seed_label, angle_type, phase='P', value=0, float_type="FLOAT", model_id=None, grid_units='degrees'): self.angle_type = angle_type super().__init__(network_code, data_or_dims, origin, spacing, seed, seed_label, phase=phase, value=value, grid_type='ANGLE', float_type=float_type, model_id=model_id, grid_units=grid_units) def write(self, path='.'): pass
# -*- coding: utf-8 -*- # ------------------------------------------------------------------ # Filename: <filename> # Purpose: <purpose> # Author: <author> # Email: <email> # # Copyright (C) <copyright> # -------------------------------------------------------------------- """ :copyright: <copyright> :license: GNU Lesser General Public License, Version 3 (http://www.gnu.org/copyleft/lesser.html) """ import numpy as np import scipy.ndimage from .base import Grid from pathlib import Path from uuid import uuid4 import matplotlib.pyplot as plt from loguru import logger import skfmm from multiprocessing import Pool, cpu_count from functools import partial from typing import Optional import h5py from .base import ray_tracer import shutil from uquake.grid import read_grid from .hdf5 import H5TTable, write_hdf5 from scipy.interpolate import interp1d __cpu_count__ = cpu_count() valid_phases = ('P', 'S') valid_grid_types = ( 'VELOCITY', 'VELOCITY_METERS', 'SLOWNESS', 'VEL2', 'SLOW2', 'SLOW2_METERS', 'SLOW_LEN', 'STACK', 'TIME', 'TIME2D', 'PROB_DENSITY', 'MISFIT', 'ANGLE', 'ANGLE2D' ) valid_float_types = { # NLL_type: numpy_type 'FLOAT': 'float32', 'DOUBLE': 'float64' } valid_grid_units = ( 'METER', 'KILOMETER', ) __velocity_grid_location__ = Path('model') __time_grid_location__ = Path('time') __default_grid_units__ = 'METER' __default_float_type__ = 'FLOAT' def validate_phase(phase): if phase not in valid_phases: msg = f'phase should be one of the following valid phases:\n' for valid_phase in valid_phases: msg += f'{valid_phase}\n' raise ValueError(msg) return True def validate_grid_type(grid_type): if grid_type.upper() not in valid_grid_types: msg = f'grid_type = {grid_type} is not valid\n' \ f'grid_type should be one of the following valid grid ' \ f'types:\n' for valid_grid_type in valid_grid_types: msg += f'{valid_grid_type}\n' raise ValueError(msg) return True def validate_grid_units(grid_units): if grid_units.upper() not in valid_grid_units: msg = f'grid_units = {grid_units} is not valid\n' \ f'grid_units should be one of the following valid grid ' \ f'units:\n' for valid_grid_unit in valid_grid_units: msg += f'{valid_grid_unit}\n' raise ValueError(msg) return True def validate_float_type(float_type): if float_type.upper() not in valid_float_types.keys(): msg = f'float_type = {float_type} is not valid\n' \ f'float_type should be one of the following valid float ' \ f'types:\n' for valid_float_type in valid_float_types: msg += f'{valid_float_type}\n' raise ValueError(msg) return True def validate(value, choices): if value not in choices: msg = f'value should be one of the following choices\n:' for choice in choices: msg += f'{choice}\n' raise ValueError(msg) return True class Seeds: __valid_measurement_units__ = ['METERS', 'KILOMETERS'] def __init__(self, sites=[], units='METERS'): """ specifies a series of source location from an inventory object :param sites: a list of sites containing at least the location, and site label :type sites: list of dictionary :Example: >>> site = {'label': 'test', 'x': 1000, 'y': 1000, 'z': 1000, 'elev': 0.0} >>> sites = [site] >>> seeds = Seeds(sites) """ validate(units, self.__valid_measurement_units__) self.units = units self.sites = sites @classmethod def from_inventory(cls, inventory): """ create from an inventory object :param inventory: :type inventory: uquake.core.inventory.Inventory """ srces = [] for site in inventory.sites: srce = {'label': site.code, 'x': site.x, 'y': site.y, 'z': site.z, 'elev': 0} srces.append(srce) return cls(srces) @classmethod def from_json(cls, json): pass def add(self, label, x, y, z, elev=0, units='METERS'): """ Add a single site to the source list :param label: site label :type label: str :param x: x location relative to geographic origin expressed in the units of measurements for site/source :type x: float :param y: y location relative to geographic origin expressed in the units of measurements for site/source :type y: float :param z: z location relative to geographic origin expressed in the units of measurements for site/source :type z: float :param elev: elevation above z grid position (positive UP) in kilometers for site (Default = 0) :type elev: float :param units: units of measurement used to express x, y, and z ( 'METERS' or 'KILOMETERS') """ validate(units.upper(), self.__valid_measurement_units__) self.sites.append({'label': label, 'x': x, 'y': y, 'z': z, 'elev': elev}) self.units = units.upper() @classmethod def generate_random_seeds_in_grid(cls, grid, n_seeds=1): """ generate n_seeds random seeds inside the grid provided. This function is mainly used for testing purposes :param grid: a grid :type grid: uquake.grid.base.Grid or an object inheriting from Grid :param n_seeds: number of seeds to generate :return: a list of seeds >>> from uquake.grid.base import Grid >>> from uquake.grid.nlloc import Seeds >>> grid_dimensions = [10, 10, 10] >>> grid_spacing = [1, 1, 1] >>> grid_origin = [0, 0, 0] >>> grid = Grid(grid_dimensions, grid_spacing, grid_origin, value=1) >>> seeds = Seeds.generate_random_seeds_in_grid(grid, n_seeds=10) """ seeds = cls.__init__() label_root = 'seed' for i, point in enumerate(grid.generate_random_points_in_grid( n_points=n_seeds)): label = f'{label_root}_{i}' seeds.add(label, point[0], point[1], point[2]) return seeds def __repr__(self): line = "" for site in self.sites: # test if site name is shorter than 6 characters line += f'GTSRCE {site["label"]} XYZ ' \ f'{site["x"] / 1000:>15.6f} ' \ f'{site["y"] / 1000:>15.6f} ' \ f'{site["z"] / 1000:>15.6f} ' \ f'0.00\n' return line @property def locs(self): seeds = [] for site in self.sites: seeds.append([site['x'], site['y'], site['z']]) return np.array(seeds) @property def labels(self): seed_labels = [] for site in self.sites: seed_labels.append(site['label']) return np.array(seed_labels) # class Srces(Seeds): # def __init__(self, sites=[], units='METERS'): # super().__init__(sites=sites, units=units) class NLLocGrid(Grid): """ base 3D rectilinear grid object """ def __init__(self, data_or_dims, origin, spacing, phase, value=0, grid_type='VELOCITY_METERS', grid_units=__default_grid_units__, float_type="FLOAT", model_id=None): """ :param data_or_dims: data or data dimensions. If dimensions are provided the a homogeneous gris is created with value=value :param origin: origin of the grid :type origin: list :param spacing: the spacing between grid nodes :type spacing: list :param phase: the uquake phase (value 'P' or 'S') :type phase: str :param value: :type value: float :param grid_type: :type grid_type: str :param grid_units: :type grid_units: str :param float_type: :type float_type: str :param model_id: :type model_id: str """ super().__init__(data_or_dims, spacing=spacing, origin=origin, value=value, resource_id=model_id) if validate_phase(phase): self.phase = phase.upper() if validate_grid_type(grid_type): self.grid_type = grid_type.upper() self.extensions = ['.buf', '.mid', '.hdr'] if validate_grid_units(grid_units): self.grid_units = grid_units.upper() if validate_float_type(float_type): self.float_type = float_type.upper() def _write_grid_data(self, base_name, path='.'): Path(path).mkdir(parents=True, exist_ok=True) with open(Path(path) / (base_name + '.buf'), 'wb') \ as out_file: if self.float_type == 'FLOAT': out_file.write(self.data.astype(np.float32).tobytes()) elif self.float_type == 'DOUBLE': out_file.write(self.data.astype(np.float64).tobytes()) def _write_grid_header(self, base_name, path='.', seed_label=None, seed=None, seed_units=None): # convert 'METER' to 'KILOMETER' if self.grid_units == 'METER': origin = self.origin / 1000 spacing = self.spacing / 1000 else: origin = self.origin spacing = self.spacing line1 = f'{self.shape[0]:d} {self.shape[1]:d} {self.shape[2]:d} ' \ f'{origin[0]:f} {origin[1]:f} {origin[2]:f} ' \ f'{spacing[0]:f} {spacing[1]:f} {spacing[2]:f} ' \ f'{self.grid_type}\n' with open(Path(path) / (base_name + '.hdr'), 'w') as out_file: out_file.write(line1) if self.grid_type in ['TIME', 'ANGLE']: if seed_units is None: logger.warning(f'seed_units are not defined. ' f'Assuming same units as grid (' f'{self.grid_units}') if self.grid_units == 'METER': seed = seed / 1000 line2 = u"%s %f %f %f\n" % (seed_label, seed[0], seed[1], seed[2]) out_file.write(line2) out_file.write(u'TRANSFORM NONE\n') return True def _write_grid_model_id(self, base_name, path='.'): with open(Path(path) / (base_name + '.mid'), 'w') as out_file: out_file.write(f'{self.model_id}') return True def write(self, base_name, path='.'): self._write_grid_data(base_name, path=path) self._write_grid_header(base_name, path=path) self._write_grid_model_id(base_name, path=path) return True def mv(self, base_name, origin, destination): """ move a NLLoc grid with a certain base_name from an origin to a destination :param NLLocGridObject: :type NLLocGridObject: uquake.grid.nlloc.NLLocGrid :param base_name: :type base_name: str :param origin: :type origin: str :param destination: :type destination: str :return: """ self.write(base_name, destination) for ext in self.extensions: shutil.move(f'{origin}/{base_name}.{ext}', f'{destination}/{base_name}.{ext}') @property def model_id(self): return self.resource_id class ModelLayer: """ 1D model varying in Z """ def __init__(self, z_top, value_top): """ :param z_top: Top of the layer z coordinates :param value_top: Value at the top of the layer """ self.z_top = z_top self.value_top = value_top def __repr__(self): return f'top - {self.z_top:5d} | value - {self.value_top:5d}\n' class LayeredVelocityModel(object): def __init__(self, model_id=None, velocity_model_layers=None, phase='P', grid_units='METER', float_type=__default_float_type__, gradient=False): """ Initialize :param model_id: model id, if not set the model ID is set using UUID :type model_id: str :param velocity_model_layers: a list of VelocityModelLayer :type velocity_model_layers: list :param phase: Phase either 'P' or 'S' :type phase: str """ if velocity_model_layers is None: self.velocity_model_layers = [] if validate_phase(phase): self.phase = phase.upper() if validate_grid_units(grid_units): self.grid_units = grid_units.upper() if validate_float_type(float_type): self.float_type = float_type.upper() self.grid_type = 'VELOCITY' if model_id is None: model_id = str(uuid4()) self.model_id = model_id self.gradient = gradient def __repr__(self): output = '' for i, layer in enumerate(self.velocity_model_layers): output += f'layer {i + 1:4d} | {layer}' return output def add_layer(self, layer): """ Add a layer to the model. The layers must be added in sequence from the top to the bottom :param layer: a LayeredModel object """ if not (type(layer) is ModelLayer): raise TypeError('layer must be a VelocityModelLayer object') if self.velocity_model_layers is None: self.velocity_model_layers = [layer] else: self.velocity_model_layers.append(layer) def gen_1d_model(self, z_min, z_max, spacing): # sort the layers to ensure the layers are properly ordered z = [] v = [] for layer in self.velocity_model_layers: z.append(layer.z_top) v.append(layer.value_top) if np.max(z) < z_max: i_z_max = np.argmax(z) v_z_max = v[i_z_max] z.append(z_max) v.append(v_z_max) if np.min(z) > z_min: i_z_min = np.argmin(z) v_z_min = v[i_z_min] z.append(z_min) v.append(v_z_min) i_sort = np.argsort(z) z = np.array(z) v = np.array(v) z = z[i_sort] v = v[i_sort] z_interp = np.arange(z_min, z_max, spacing[2]) kind = 'previous' if self.gradient: kind = 'linear' f_interp = interp1d(z, v, kind=kind) v_interp = f_interp(z_interp) return z_interp, v_interp def gen_3d_grid(self, network_code, dims, origin, spacing): model_grid_3d = VelocityGrid3D.from_layered_model(self, network_code, dims, origin, spacing) return model_grid_3d def plot(self, z_min, z_max, spacing, *args, **kwargs): """ Plot the 1D velocity model :param z_min: lower limit of the model :param z_max: upper limit of the model :param spacing: plotting resolution in z :return: matplotlib axis """ z_interp, v_interp = self.gen_1d_model(z_min, z_max, spacing) x_label = None if self.phase == 'P': x_label = 'P-wave velocity' elif self.phase == 'S': x_label = 's-wave velocity' if self.grid_units == 'METER': units = 'm' else: units = 'km' y_label = f'z [{units}]' ax = plt.axes() ax.plot(v_interp, z_interp, *args, **kwargs) plt.xlabel(x_label) plt.ylabel(y_label) ax.set_aspect(2) plt.tight_layout() return ax class VelocityGrid3D(NLLocGrid): def __init__(self, network_code, data_or_dims, origin, spacing, phase='P', value=0, float_type=__default_float_type__, model_id=None, **kwargs): self.network_code = network_code if (type(spacing) is int) | (type(spacing) is float): spacing = [spacing, spacing, spacing] super().__init__(data_or_dims, origin, spacing, phase, value=value, grid_type='VELOCITY_METERS', grid_units='METER', float_type=float_type, model_id=model_id) @staticmethod def get_base_name(network_code, phase): """ return the base name given a network code and a phase :param network_code: Code of the network :type network_code: str :param phase: Phase, either P or S :type phase: str either 'P' or 'S' :return: the base name """ validate_phase(phase) return f'{network_code.upper()}.{phase.upper()}.mod' @classmethod def from_ocd(cls, origin, corner, dimensions, val=0): pass @classmethod def from_ocs(cls, origin, corner, spacing, val=0): pass @classmethod def from_ocd(cls, origin, dimensions, spacing, val=0): pass @classmethod def from_layered_model(cls, layered_model, network_code, dims, origin, spacing, **kwargs): """ Generating a 3D grid model from :param network_code: :param layered_model: :param dims: :param origin: :param spacing: :param kwargs: :return: """ z_min = origin[-1] z_max = z_min + spacing[-1] * dims[-1] z_interp, v_interp = layered_model.gen_1d_model(z_min, z_max, spacing) data = np.zeros(dims) for i, v in enumerate(v_interp): data[:, :, i] = v_interp[i] return cls(network_code, data, origin, spacing, phase=layered_model.phase, float_type=layered_model.float_type, model_id=layered_model.model_id, **kwargs) def to_slow_lens(self): data = self.spacing[0] / self.data return NLLocGrid(data, self.origin, self.spacing, self.phase, grid_type='SLOW_LEN', grid_units=self.grid_units, float_type=self.float_type, model_id=self.model_id) @classmethod def from_slow_len(cls, grid: NLLocGrid, network_code: str): data = np.mean(grid.spacing) / grid.data return cls(network_code, data, grid.origin, grid.spacing, phase=grid.phase, float_type=grid.float_type, model_id=grid.model_id) def to_time(self, seed, seed_label, sub_grid_resolution=0.1, *args, **kwargs): """ Eikonal solver based on scikit fast marching solver :param seed: numpy array location of the seed or origin of useis wave in model coordinates (usually location of a station or an event) :type seed: numpy.array or list :param seed_label: seed label (name of station) :type seed_label: basestring :param sub_grid_resolution: resolution of the grid around the seed. Propagating the wavefront on a denser grid around the seed, significantly improves the travel time accuracy. The value represents a fraction of the grid resolution. For instance, assuming a grid with spacing of 10m, if the sub_grid_resolution is set to 0.1, the resolution around the grid will be 1m. :rtype: TTGrid """ if isinstance(seed, list): seed = np.array(seed) if not self.in_grid(seed): logger.warning(f'{seed_label} is outside the grid. ' f'The travel time grid will not be calculated') return origin = self.origin shape = self.shape spacing = self.spacing sub_grid_spacing = spacing * sub_grid_resolution # extent = ((4 * sub_grid_spacing) * 1.2 + sub_grid_spacing) n_pts_inner_grid = (4 * spacing / sub_grid_spacing * 1.2).astype(int) for i in range(0, len(n_pts_inner_grid)): if n_pts_inner_grid[i] % 2: n_pts_inner_grid[i] += 1 x_i = np.arange(0, n_pts_inner_grid[0]) * sub_grid_spacing[0] y_i = np.arange(0, n_pts_inner_grid[1]) * sub_grid_spacing[1] z_i = np.arange(0, n_pts_inner_grid[2]) * sub_grid_spacing[2] x_i = x_i - np.mean(x_i) + seed[0] y_i = y_i - np.mean(y_i) + seed[1] z_i = z_i - np.mean(z_i) + seed[2] X_i, Y_i, Z_i = np.meshgrid(x_i, y_i, z_i, indexing='ij') coords = np.array([X_i.ravel(), Y_i.ravel(), Z_i.ravel()]).T vel = self.interpolate(coords, grid_space=False).reshape( X_i.shape) phi = np.ones_like(X_i) phi[int(np.floor(len(x_i) / 2)), int(np.floor(len(y_i) / 2)), int(np.floor(len(z_i) / 2))] = 0 tt_tmp = skfmm.travel_time(phi, vel, dx=sub_grid_spacing) tt_tmp_grid = TTGrid(self.network_code, tt_tmp, [x_i[0], y_i[0], z_i[0]], sub_grid_spacing, seed, seed_label, phase=self.phase, float_type=self.float_type, model_id=self.model_id, grid_units=self.grid_units) data = self.data xe = origin[0] + np.arange(0, shape[0], 1) * spacing[0] ye = origin[1] + np.arange(0, shape[1], 1) * spacing[1] ze = origin[2] + np.arange(0, shape[2], 1) * spacing[2] Xe, Ye, Ze = np.meshgrid(xe, ye, ze, indexing='ij') coords = np.array([Xe.ravel(), Ye.ravel(), Ze.ravel()]) corner1 = np.array([np.min(x_i), np.min(y_i), np.min(z_i)]) corner2 = np.array([np.max(x_i), np.max(y_i), np.max(z_i)]) test = ((coords[0, :] >= corner1[0]) & (coords[0, :] <= corner2[0]) & (coords[1, :] >= corner1[1]) & (coords[1, :] <= corner2[1]) & (coords[2, :] >= corner1[2]) & (coords[2, :] <= corner2[2])) Xe_grid = Xe.ravel()[test] Ye_grid = Ye.ravel()[test] Ze_grid = Ze.ravel()[test] X = np.array([Xe_grid, Ye_grid, Ze_grid]).T tt_interp = tt_tmp_grid.interpolate(X, grid_space=False, order=3)[0] bias = np.max(tt_interp) phi_out = np.ones_like(Xe).ravel() phi_out[test] = tt_interp - bias phi_out = phi_out.reshape(Xe.shape) tt_out = skfmm.travel_time(phi_out, data, dx=spacing) # tt_out = tt_out.ravel() + bias tt_out = tt_out.ravel() + bias tt_out[test] = tt_interp tt_out = tt_out.reshape(Xe.shape) tt_out_grid = TTGrid(self.network_code, tt_out, self.origin, self.spacing, seed, seed_label, phase=self.phase, float_type=self.float_type, model_id=self.model_id, grid_units=self.grid_units) tt_out_grid.data -= tt_out_grid.interpolate(seed.T, grid_space=False, order=3)[0] return tt_out_grid def to_time_multi_threaded(self, seeds, seed_labels, cpu_utilisation=0.9, *args, **kwargs): """ Multi-threaded version of the Eikonal solver based on scikit fast marching solver :param seeds: array of seed :type seeds: np.array :param seed_labels: array of seed_labels :type seed_labels: np.array :param cpu_utilisation: fraction of the cpu core to be used for the processing task (between 0 and 1) :type cpu_utilisation: float between 0 and 1 :param args: arguments to be passed directly to skfmm.travel_time function :param kwargs: keyword arguments to be passed directly to skfmm.travel_time function :return: a travel time grid ensemble :rtype: TravelTimeEnsemble """ num_threads = int(np.ceil(cpu_utilisation * __cpu_count__)) # ensuring that the number of threads is comprised between 1 and # __cpu_count__ num_threads = np.max([np.min([num_threads, __cpu_count__]), 1]) data = [] for seed, seed_label in zip(seeds, seed_labels): if not self.in_grid(seed): logger.warning(f'{seed_label} is outside the grid. ' f'The travel time grid will not be calculated') continue data.append((seed, seed_label)) with Pool(num_threads) as pool: results = pool.starmap(self.to_time, data) tt_grid_ensemble = TravelTimeEnsemble(results) return tt_grid_ensemble def write(self, path='.'): base_name = self.base_name super().write(base_name, path=path) def mv(self, origin, destination): """ move a the velocity grid files from {origin} to {destination} :param origin: origin :param destination: :return: """ super().mv(self, self.base_name, origin, destination) @property def base_name(self): return self.get_base_name(self.network_code, self.phase) class VelocityGridEnsemble: def __init__(self, p_velocity_grid, s_velocity_grid): """ :param p_velocity_grid: p-wave 3D velocity grid :type p_velocity_grid: VelocityGrid3D :param s_velocity_grid: s-wave 3D velocity grid :type s_velocity_grid: VelocityGrid3D """ self.p_velocity_grid = p_velocity_grid self.s_velocity_grid = s_velocity_grid self.__i__ = 0 def __getitem__(self, item): if item.upper() == 'P': return self.p_velocity_grid elif item.upper() == 'S': return self.s_velocity_grid else: raise ValueError(f'{item} is not a valid key. ' f'The key value must either be "P" or "S"') def __iter__(self): self.__i__ = 0 return self def __next__(self): if self.__i__ < 2: if self.__i__ == '0': return self.p_velocity_grid elif self.__i__ == '1': return self.s_velocity_grid else: raise StopIteration # @property # def keys(self): # return ['P', 'S'] def keys(self): return ['P', 'S'] def write(self, path='.'): for key in self.keys(): self[key].write(path=path) def to_time_multi_threaded(self, seeds, seed_labels, cpu_utilisation=0.9, *args, **kwargs): tt_grid_ensemble = TravelTimeEnsemble([]) for key in self.keys(): tt_grids = self[key].to_time_multi_threaded(seeds, seed_labels, cpu_utilisation= cpu_utilisation, *args, **kwargs) tt_grid_ensemble += tt_grids return tt_grid_ensemble def to_time(self, seeds, seed_labels, multi_threaded=False, sub_grid_resolution=0.1, *args, **kwargs): """ Convert the velocity grids to travel-time :param seeds: a list of seeds usually represents site location :type seeds: numpy.array :param seed_labels: a list of seed labels, usually represents site codes :type seed_labels: list :param multi_threaded: if true, the travel-time grid will used multithreading :param sub_grid_resolution: sub grid resolution for near source solution in fraction of grid resolution :param args: :param kwargs: :return: Travel time grid ensemble :rtype: ~uquake.grid.nlloc.TTGridEnsemble """ if multi_threaded: return self.to_time_multi_threaded(seeds, seed_labels, sub_grid_resolution= sub_grid_resolution, *args, **kwargs) travel_time_grids = [] for seed, seed_label in zip(seeds, seed_labels): for key in self.keys(): tg = self[key].to_time(seed, seed_label, sub_grid_resolution =sub_grid_resolution, *args, **kwargs) travel_time_grids.append(tg) return TravelTimeEnsemble(travel_time_grids) class SeededGrid(NLLocGrid): """ container for seeded grids (e.g., travel time, azimuth and take off angle) """ __valid_grid_type__ = ['TIME', 'TIME2D', 'ANGLE', 'ANGLE2D'] def __init__(self, network_code, data_or_dims, origin, spacing, seed, seed_label, phase='P', value=0, grid_units=__default_grid_units__, grid_type='TIME', float_type="FLOAT", model_id=None): self.seed = seed self.seed_label = seed_label self.network_code = network_code if grid_type not in self.__valid_grid_type__: raise ValueError() self.grid_type = grid_type super().__init__(data_or_dims, origin, spacing, phase=phase, value=value, grid_type='TIME', grid_units=grid_units, float_type=float_type, model_id=model_id) def __repr__(self): line = f'Travel Time Grid\n' \ f' origin : {self.origin}\n' \ f' spacing : {self.spacing}\n' \ f' dimensions : {self.shape}\n' \ f' seed label : {self.seed_label}\n' \ f' seed location : {self.seed}' return line @classmethod def get_base_name(cls, network_code, phase, seed_label, grid_type): validate_phase(phase) if grid_type not in cls.__valid_grid_type__: raise ValueError(f'{grid_type} is not a valid grid type') base_name = f'{network_code}.{phase}.{seed_label}.' \ f'{grid_type.lower()}' return base_name @property def base_name(self): base_name = self.get_base_name(self.network_code, self.phase, self.seed_label, self.grid_type) return base_name def write(self, path='.'): base_name = self.base_name self._write_grid_data(base_name, path=path) self._write_grid_header(base_name, path=path, seed=self.seed, seed_label=self.seed_label, seed_units=self.grid_units) self._write_grid_model_id(base_name, path=path) class TTGrid(SeededGrid): def __init__(self, network_code, data_or_dims, origin, spacing, seed, seed_label, phase='P', value=0, float_type="FLOAT", model_id=None, grid_units='METER'): super().__init__(network_code, data_or_dims, origin, spacing, seed, seed_label, phase=phase, value=value, grid_type='TIME', float_type=float_type, model_id=model_id, grid_units=grid_units) def to_azimuth(self): """ This function calculate the take off angle and azimuth for every grid point given a travel time grid calculated using an Eikonal solver :return: azimuth and takeoff angles grids .. Note: The convention for the takeoff angle is that 0 degree is down. """ gds_tmp = np.gradient(self.data) gds = [-gd for gd in gds_tmp] azimuth = np.arctan2(gds[0], gds[1]) * 180 / np.pi # azimuth is zero northwards return AngleGrid(self.network_code, azimuth, self.origin, self.spacing, self.seed, self.seed_label, 'AZIMUTH', phase=self.phase, float_type=self.float_type, model_id=self.model_id, grid_units=self.grid_units) def to_takeoff(self): gds_tmp = np.gradient(self.data) gds = [-gd for gd in gds_tmp] hor = np.sqrt(gds[0] ** 2 + gds[1] ** 2) takeoff = np.arctan2(hor, -gds[2]) * 180 / np.pi # takeoff is zero pointing down return AngleGrid(self.network_code, takeoff, self.origin, self.spacing, self.seed, self.seed_label, 'TAKEOFF', phase=self.phase, float_type=self.float_type, model_id=self.model_id, grid_units=self.grid_units) def to_azimuth_point(self, coord, grid_space=False, mode='nearest', order=1, **kwargs): """ calculate the azimuth angle at a particular point on the grid for a given seed location :param coord: coordinates at which to calculate the takeoff angle :param grid_space: true if the coordinates are expressed in grid space (indices can be fractional) as opposed to model space (x, y, z) :param mode: interpolation mode :param order: interpolation order :return: takeoff angle at the location coord """ return self.to_azimuth().interpolate(coord, grid_space=grid_space, mode=mode, order=order, **kwargs)[0] def to_takeoff_point(self, coord, grid_space=False, mode='nearest', order=1, **kwargs): """ calculate the takeoff angle at a particular point on the grid for a given seed location :param coord: coordinates at which to calculate the takeoff angle :param grid_space: true if the coordinates are expressed in grid space (indices can be fractional) as opposed to model space (x, y, z) :param mode: interpolation mode :param order: interpolation order :return: takeoff angle at the location coord """ return self.to_takeoff().interpolate(coord, grid_space=grid_space, mode=mode, order=order, **kwargs)[0] def ray_tracer(self, start, grid_space=False, max_iter=1000, arrival_id=None): """ This function calculates the ray between a starting point (start) and an end point, which should be the seed of the travel_time grid, using the gradient descent method. :param start: the starting point (usually event location) :type start: tuple, list or numpy.array :param grid_space: true if the coordinates are expressed in grid space (indices can be fractional) as opposed to model space (x, y, z) :param max_iter: maximum number of iteration :param arrival_id: id of the arrival associated to the ray if applicable :rtype: numpy.array """ return ray_tracer(self, start, grid_space=grid_space, max_iter=max_iter, arrival_id=arrival_id, earth_model_id=self.model_id, network=self.network_code) @classmethod def from_velocity(cls, seed, seed_label, velocity_grid): return velocity_grid.to_time(seed, seed_label) def write(self, path='.'): return super().write(path=path) @property def site(self): return self.seed_label class TravelTimeEnsemble: def __init__(self, travel_time_grids): """ Combine a list of travel time grids together providing meta functionality (multi-threaded ray tracing, sorting, travel-time calculation for a specific location etc.). It is assumed that all grids are compatible, i.e., that all the grids have the same origin, spacing and dimensions. :param travel_time_grids: a list of TTGrid objects """ self.travel_time_grids = travel_time_grids self.__i__ = 0 for tt_grid in self.travel_time_grids: try: assert tt_grid.check_compatibility(travel_time_grids[0]) except: raise AssertionError('grids are not all compatible') def __len__(self): return len(self.travel_time_grids) def __add__(self, other): for travel_time_grid in other.travel_time_grids: self.travel_time_grids.append(travel_time_grid) return TravelTimeEnsemble(self.travel_time_grids) def __iter__(self): self.__i__ = 0 return self def __next__(self): if self.__i__ < len(self): result = self.travel_time_grids[self.__i__] self.__i__ += 1 return result else: raise StopIteration def __getitem__(self, item): if isinstance(item, int): return self.travel_time_grids[item] if isinstance(item, str): tt_grid_out = None for travel_time_grid in self.travel_time_grids: if travel_time_grid.seed_label == item: return travel_time_grid raise KeyError(f'{item} not found') def __repr__(self): line = f'Number of travel time grids: {len(self)}' return line @classmethod def from_files(cls, path): """ create a travel time ensemble from files located in a directory :param path: the base path to the directory containing the travel time files. :return: """ tt_grids = [] for fle in Path(path).glob('*time*.hdr'): path = fle.parent base_name = '.'.join(fle.name.split('.')[:-1]) fname = str(Path(path) / base_name) tt_grid = read_grid(fname, format='NLLOC', float_type=__default_float_type__) tt_grids.append(tt_grid) return cls(tt_grids) def select(self, seed_labels: Optional[list] = None, phase: Optional[list] = None): """ return the a list of grid corresponding to seed_labels. :param seed_labels: seed labels of the travel time grids to return :param phase: the phase {'P' or 'S'}, both if None. :return: a list of travel time grids :rtype: TravelTimeEnsemble """ if (seed_labels is None) and (phase is None): return self tmp = [] if seed_labels is None: seed_labels = np.unique(self.seed_labels) if phase is None: phase = ['P', 'S'] returned_grids = [] for travel_time_grid in self.travel_time_grids: if travel_time_grid.seed_label in seed_labels: if travel_time_grid.phase in phase: returned_grids.append(travel_time_grid) return TravelTimeEnsemble(returned_grids) def sort(self, ascending:bool = True): """ sorting the travel time grid by seed_label :param ascending: if true the grids are sorted in ascending order :param ascending: bool :return: sorted travel time grids. :rtype: TravelTimeEnsemble """ i = np.sort(self.seed_labels) if not ascending: i = i[-1::-1] sorted_tt_grids = np.array(self.travel_time_grids)[i] return TravelTimeEnsemble(sorted_tt_grids) def travel_time(self, seed, grid_space: bool = False, seed_labels: Optional[list] = None, phase: Optional[list] = None): """ calculate the travel time at a specific point for a series of site ids :param seed: travel time seed :param grid_space: true if the coordinates are expressed in grid space (indices can be fractional) as opposed to model space (x, y, z) :param seed_labels: a list of sites from which to calculate the travel time. :param phase: a list of phases for which the travel time need to be calculated :return: a list of dictionary containing the travel time and site id """ if isinstance(seed, list): seed = np.array(seed) if grid_space: seed = self.travel_time_grids[0].transform_from(seed) if not self.travel_time_grids[0].in_grid(seed): raise ValueError('seed is outside the grid') tt_grids = self.select(seed_labels=seed_labels, phase=phase) tts = [] labels = [] phases = [] for tt_grid in tt_grids: labels.append(tt_grid.seed_label) tts.append(tt_grid.interpolate(seed.T, grid_space=False)[0]) phases.append(tt_grid.phase) tts_dict = {} for phase in np.unique(phases): tts_dict[phase] = {} for label, tt, phase in zip(labels, tts, phases): tts_dict[phase][label] = tt return tts_dict def angles(self, seed, grid_space: bool = False, seed_labels: Optional[list] = None, phase: Optional[list] = None, **kwargs): """ calculate the azimuth at a specific point for a series of site ids :param seed: travel time seed :param grid_space: true if the coordinates are expressed in grid space (indices can be fractional) as opposed to model space (x, y, z) :param seed_labels: a list of sites from which to calculate the travel time. :param phase: a list of phases for which the travel time need to be calculated :return: a list of dictionary containing the azimuth and site id """ if isinstance(seed, list): seed = np.array(seed) if grid_space: seed = self.travel_time_grids[0].transform_from(seed) if not self.travel_time_grids[0].in_grid(seed): raise ValueError('seed is outside the grid') tt_grids = self.select(seed_labels=seed_labels, phase=phase) azimuths = [] takeoffs = [] labels = [] phases = [] for tt_grid in tt_grids: labels.append(tt_grid.seed_label) azimuths.append(tt_grid.to_azimuth_point(seed.T, grid_space=False, **kwargs)) takeoffs.append(tt_grid.to_takeoff_point(seed.T, grid_space=False, **kwargs)) phases.append(tt_grid.phase) azimuth_dict = {} takeoff_dict = {} for phase in np.unique(phases): azimuth_dict[phase] = {} takeoff_dict[phase] = {} for label, azimuth, takeoff, phase in zip(labels, azimuths, takeoffs, phases): takeoff_dict[phase][label] = takeoff azimuth_dict[phase][label] = azimuth angle_dict = {} angle_dict['takeoff'] = takeoff_dict angle_dict['azimuth'] = azimuth_dict return angle_dict def ray_tracer(self, start, seed_labels=None, multithreading=False, cpu_utilisation=0.9, grid_space=False, max_iter=1000): """ :param start: origin of the ray, usually the location of an event :param seed_labels: a list of seed labels :param grid_space: true if the coordinates are expressed in grid space (indices can be fractional) as opposed to model space (x, y, z) :param multithreading: if True use multithreading :param max_iter: maximum number of iteration :param cpu_utilisation: fraction of core to use, between 0 and 1. The number of core to be use is bound between 1 and the total number of cores :return: a list of ray :rtype: list """ travel_time_grids = self.select(seed_labels=seed_labels) kwargs = {'grid_space': grid_space, 'max_iter': max_iter} if multithreading: ray_tracer_func = partial(ray_tracer, **kwargs) num_threads = int(np.ceil(cpu_utilisation * __cpu_count__)) # ensuring that the number of threads is comprised between 1 and # __cpu_count__ num_threads = np.max([np.min([num_threads, __cpu_count__]), 1]) data = [] for travel_time_grid in travel_time_grids: data.append((travel_time_grid, start)) with Pool(num_threads) as pool: results = pool.starmap(ray_tracer_func, data) for result in results: result.network = self.travel_time_grids[0].network_code else: results = [] for travel_time_grid in travel_time_grids: results.append(travel_time_grid.ray_tracer(start, **kwargs)) return results @property def seeds(self): seeds = [] for seed_label in self.seed_labels: seeds.append(self.select(seed_labels=seed_label)[0].seed) return np.array(seeds) @property def seed_labels(self): seed_labels = [] for grid in self.travel_time_grids: seed_labels.append(grid.seed_label) return np.unique(np.array(seed_labels)) @property def shape(self): return self.travel_time_grids[0].shape @property def origin(self): return self.travel_time_grids[0].origin @property def spacing(self): return self.travel_time_grids[0].spacing def write(self, path='.'): for travel_time_grid in self.travel_time_grids: travel_time_grid.write(path=path) def write_hdf5(self, file_name): write_hdf5(file_name, self) def to_hdf5(self, file_name): self.write_hdf5(file_name) return H5TTable(file_name) class AngleGrid(SeededGrid): def __init__(self, network_code, data_or_dims, origin, spacing, seed, seed_label, angle_type, phase='P', value=0, float_type="FLOAT", model_id=None, grid_units='degrees'): self.angle_type = angle_type super().__init__(network_code, data_or_dims, origin, spacing, seed, seed_label, phase=phase, value=value, grid_type='ANGLE', float_type=float_type, model_id=model_id, grid_units=grid_units) def write(self, path='.'): pass
import time import types import calendar import datetime import functools import synapse.exc as s_exc import synapse.common as s_common import synapse.lib.cli as s_cli import synapse.lib.cmd as s_cmd import synapse.lib.time as s_time import synapse.lib.parser as s_parser StatHelp = ''' Gives detailed information about a single cron job. Syntax: cron stat <iden prefix> Notes: Any prefix that matches exactly one valid cron job iden is accepted. ''' DelHelp = ''' Deletes a single cron job. Syntax: cron del|rm <iden prefix> Notes: Any prefix that matches exactly one valid cron job iden is accepted. ''' ListHelp = ''' List existing cron jobs in a cortex. Syntax: cron list|ls Example: user iden en? rpt? now? err? # start last start last end query root 029ce7bd.. Y Y N 17863 2019-06-11T21:47 2019-06-11T21:47 exec foo root 06b46533.. Y Y N 18140 2019-06-11T21:48 2019-06-11T21:48 exec bar ''' ModHelp = ''' Changes an existing cron job's query. Syntax: cron mod|edit <iden prefix> <new query> Notes: Any prefix that matches exactly one valid cron iden is accepted. ''' EnableHelp = ''' Enable an existing cron job. Syntax: cron enable <iden prefix> Notes: Any prefix that matches exactly one valid cron iden is accepted. ''' DisableHelp = ''' Disable an existing cron job. Syntax: cron disable <iden prefix> Notes: Any prefix that matches exactly one valid cron iden is accepted. ''' AddHelp = ''' Add a recurring cron job to a cortex. Syntax: cron add [optional arguments] {query} --minute, -M int[,int...][=] --hour, -H --day, -d --month, -m --year, -y *or:* [--hourly <min> | --daily <hour>:<min> | --monthly <day>:<hour>:<min> | --yearly <month>:<day>:<hour>:<min>] Notes: All times are interpreted as UTC. All arguments are interpreted as the job period, unless the value ends in an equals sign, in which case the argument is interpreted as the recurrence period. Only one recurrence period parameter may be specified. Currently, a fixed unit must not be larger than a specified recurrence period. i.e. '--hour 7 --minute +15' (every 15 minutes from 7-8am?) is not supported. Value values for fixed hours are 0-23 on a 24-hour clock where midnight is 0. If the --day parameter value does not start with in '+' and is an integer, it is interpreted as a fixed day of the month. A negative integer may be specified to count from the end of the month with -1 meaning the last day of the month. All fixed day values are clamped to valid days, so for example '-d 31' will run on February 28. If the fixed day parameter is a value in ([Mon, Tue, Wed, Thu, Fri, Sat, Sun] if locale is set to English) it is interpreted as a fixed day of the week. Otherwise, if the parameter value starts with a '+', then it is interpreted as an recurrence interval of that many days. If no plus-sign-starting parameter is specified, the recurrence period defaults to the unit larger than all the fixed parameters. e.g. '-M 5' means every hour at 5 minutes past, and -H 3, -M 1 means 3:01 every day. At least one optional parameter must be provided. All parameters accept multiple comma-separated values. If multiple parameters have multiple values, all combinations of those values are used. All fixed units not specified lower than the recurrence period default to the lowest valid value, e.g. -m +2 will be scheduled at 12:00am the first of every other month. One exception is the largest fixed value is day of the week, then the default period is set to be a week. A month period with a day of week fixed value is not currently supported. Fixed-value year (i.e. --year 2019) is not supported. See the 'at' command for one-time cron jobs. As an alternative to the above options, one may use exactly one of --hourly, --daily, --monthly, --yearly with a colon-separated list of fixed parameters for the value. It is an error to use both the individual options and these aliases at the same time. Examples: Run a query every last day of the month at 3 am cron add -H 3 -d-1 {#foo} Run a query every 8 hours cron add -H +8 {#foo} Run a query every Wednesday and Sunday at midnight and noon cron add -H 0,12 -d Wed,Sun {#foo} Run a query every other day at 3:57pm cron add -d +2 -M 57 -H 15 {#foo} ''' class Cron(s_cli.Cmd): ''' Manages cron jobs in a cortex. Cron jobs are rules persistently stored in a cortex such that storm queries automatically run on a time schedule. Cron jobs may be be recurring or one-time. Use the 'at' command to add one-time jobs. A subcommand is required. Use 'cron -h' for more detailed help. ''' _cmd_name = 'cron' _cmd_syntax = ( ('line', {'type': 'glob'}), # type: ignore ) async def _match_idens(self, core, prefix): ''' Returns the iden that starts with prefix. Prints out error and returns None if it doesn't match exactly one. ''' idens = [cron['iden'] for cron in await core.listCronJobs()] matches = [iden for iden in idens if iden.startswith(prefix)] if len(matches) == 1: return matches[0] elif len(matches) == 0: self.printf('Error: provided iden does not match any valid authorized cron job') else: self.printf('Error: provided iden matches more than one cron job') return None def _make_argparser(self): parser = s_cmd.Parser(prog='cron', outp=self, description=self.__doc__) subparsers = parser.add_subparsers(title='subcommands', required=True, dest='cmd', parser_class=functools.partial(s_cmd.Parser, outp=self)) subparsers.add_parser('list', aliases=['ls'], help="List cron jobs you're allowed to manipulate", usage=ListHelp) parser_add = subparsers.add_parser('add', help='add a cron job', usage=AddHelp) parser_add.add_argument('--minute', '-M') parser_add.add_argument('--hour', '-H') parser_add.add_argument('--day', '-d', help='day of week, day of month or number of days') parser_add.add_argument('--month', '-m') parser_add.add_argument('--year', '-y') group = parser_add.add_mutually_exclusive_group() group.add_argument('--hourly') group.add_argument('--daily') group.add_argument('--monthly') group.add_argument('--yearly') parser_add.add_argument('query', help='Storm query in curly braces') parser_del = subparsers.add_parser('del', aliases=['rm'], help='delete a cron job', usage=DelHelp) parser_del.add_argument('prefix', help='Cron job iden prefix') parser_stat = subparsers.add_parser('stat', help='details a cron job', usage=StatHelp) parser_stat.add_argument('prefix', help='Cron job iden prefix') parser_mod = subparsers.add_parser('mod', aliases=['edit'], help='change an existing cron job', usage=ModHelp) parser_mod.add_argument('prefix', help='Cron job iden prefix') parser_mod.add_argument('query', help='New Storm query in curly braces') parser_en = subparsers.add_parser('enable', help='enable an existing cron job', usage=EnableHelp) parser_en.add_argument('prefix', help='Cron job iden prefix') parser_dis = subparsers.add_parser('disable', help='disable an existing cron job', usage=DisableHelp) parser_dis.add_argument('prefix', help='Cron job iden prefix') return parser @staticmethod def _parse_weekday(val): ''' Try to match a day-of-week abbreviation, then try a day-of-week full name ''' val = val.title() try: return list(calendar.day_abbr).index(val) except ValueError: try: return list(calendar.day_name).index(val) except ValueError: return None @staticmethod def _parse_incval(incunit, incval): ''' Parse a non-day increment value. Should be an integer or a comma-separated integer list. ''' try: retn = [int(val) for val in incval.split(',')] except ValueError: return None return retn[0] if len(retn) == 1 else retn @staticmethod def _parse_req(requnit, reqval): ''' Parse a non-day fixed value ''' assert reqval[0] != '=' try: retn = [] for val in reqval.split(','): if requnit == 'month': if reqval[0].isdigit(): retn.append(int(reqval)) # must be a month (1-12) else: try: retn.append(list(calendar.month_abbr).index(val.title())) except ValueError: retn.append(list(calendar.month_name).index(val.title())) else: retn.append(int(val)) except ValueError: return None if not retn: return None return retn[0] if len(retn) == 1 else retn @staticmethod def _parse_day(optval): ''' Parse a --day argument ''' isreq = not optval.startswith('+') if not isreq: optval = optval[1:] try: retnval = [] unit = None for val in optval.split(','): if not val: raise ValueError if val[-1].isdigit(): newunit = 'dayofmonth' if isreq else 'day' if unit is None: unit = newunit elif newunit != unit: raise ValueError retnval.append(int(val)) else: newunit = 'dayofweek' if unit is None: unit = newunit elif newunit != unit: raise ValueError weekday = Cron._parse_weekday(val) if weekday is None: raise ValueError retnval.append(weekday) if len(retnval) == 0: raise ValueError except ValueError: return None, None if len(retnval) == 1: retnval = retnval[0] return unit, retnval def _parse_alias(self, opts): retn = types.SimpleNamespace() retn.query = opts.query if opts.hourly is not None: retn.hour = '+1' retn.minute = str(int(opts.hourly)) return retn if opts.daily is not None: fields = time.strptime(opts.daily, '%H:%M') retn.day = '+1' retn.hour = str(fields.tm_hour) retn.minute = str(fields.tm_min) return retn if opts.monthly is not None: day, rest = opts.monthly.split(':', 1) fields = time.strptime(rest, '%H:%M') retn.month = '+1' retn.day = day retn.hour = str(fields.tm_hour) retn.minute = str(fields.tm_min) return retn if opts.yearly is not None: fields = opts.yearly.split(':') if len(fields) != 4: raise ValueError(f'Failed to parse parameter {opts.yearly}') retn.year = '+1' retn.month, retn.day, retn.hour, retn.minute = fields return retn return None async def _handle_add(self, core, opts): incunit = None incval = None reqdict = {} valinfo = { # unit: (minval, next largest unit) 'month': (1, 'year'), 'dayofmonth': (1, 'month'), 'hour': (0, 'day'), 'minute': (0, 'hour'), } if not opts.query.startswith('{'): self.printf('Error: query parameter must start with {') return try: alias_opts = self._parse_alias(opts) except ValueError as e: self.printf(f'Error: Failed to parse ..ly parameter: {' '.join(e.args)}') return if alias_opts: if opts.year or opts.month or opts.day or opts.hour or opts.minute: self.printf('Error: may not use both alias (..ly) and explicit options at the same time') return opts = alias_opts for optname in ('year', 'month', 'day', 'hour', 'minute'): optval = getattr(opts, optname, None) if optval is None: if incunit is None and not reqdict: continue # The option isn't set, but a higher unit is. Go ahead and set the required part to the lowest valid # value, e.g. so -m 2 would run on the *first* of every other month at midnight if optname == 'day': reqdict['dayofmonth'] = 1 else: reqdict[optname] = valinfo[optname][0] continue isreq = not optval.startswith('+') if optname == 'day': unit, val = self._parse_day(optval) if val is None: self.printf(f'Error: failed to parse day value "{optval}"') return if unit == 'dayofweek': if incunit is not None: self.printf('Error: May not provide a recurrence value with day of week') return if reqdict: self.printf('Error: may not fix month or year with day of week') return incunit, incval = unit, val elif unit == 'day': incunit, incval = unit, val else: assert unit == 'dayofmonth' reqdict[unit] = val continue if not isreq: if incunit is not None: self.printf('Error: may not provide more than 1 recurrence parameter') return if reqdict: self.printf('Error: fixed unit may not be larger than recurrence unit') return incunit = optname incval = self._parse_incval(optname, optval) if incval is None: self.printf('Error: failed to parse parameter') return continue if optname == 'year': self.printf('Error: year may not be a fixed value') return reqval = self._parse_req(optname, optval) if reqval is None: self.printf(f'Error: failed to parse fixed parameter "{optval}"') return reqdict[optname] = reqval # If not set, default (incunit, incval) to (1, the next largest unit) if incunit is None: if not reqdict: self.printf('Error: must provide at least one optional argument') return requnit = next(iter(reqdict)) # the first key added is the biggest unit incunit = valinfo[requnit][1] incval = 1 # Remove the curly braces query = opts.query[1:-1] cdef = {'storm': query, 'reqs': reqdict, 'incunit': incunit, 'incvals': incval, } newcdef = await core.addCronJob(cdef) self.printf(f'Created cron job {newcdef['iden']}') @staticmethod def _format_timestamp(ts): # N.B. normally better to use fromtimestamp with UTC timezone, but we don't want timezone to print out return datetime.datetime.utcfromtimestamp(ts).isoformat(timespec='minutes') async def _handle_list(self, core, opts): cronlist = await core.listCronJobs() if not cronlist: self.printf('No cron jobs found') return self.printf( f'{'user':10} {'iden':10} {'en?':3} {'rpt?':4} {'now?':4} {'err?':4} ' f'{'# start':7} {'last start':16} {'last end':16} {'query'}') for cron in cronlist: iden = cron.get('iden') idenf = iden[:8] + '..' user = cron.get('username') or '<None>' query = cron.get('query') or '<missing>' isrecur = 'Y' if cron.get('recur') else 'N' isrunning = 'Y' if cron.get('isrunning') else 'N' enabled = 'Y' if cron.get('enabled') else 'N' startcount = cron.get('startcount') or 0 laststart = cron.get('laststarttime') laststart = 'Never' if laststart is None else self._format_timestamp(laststart) lastend = cron.get('lastfinishtime') lastend = 'Never' if lastend is None else self._format_timestamp(lastend) result = cron.get('lastresult') iserr = 'X' if result is not None and not result.startswith('finished successfully') else ' ' self.printf( f'{user:10} {idenf:10} {enabled:3} {isrecur:4} {isrunning:4} {iserr:4} ' f'{startcount:7} {laststart:16} {lastend:16} {query}') async def _handle_mod(self, core, opts): prefix = opts.prefix query = opts.query if not query.startswith('{'): self.printf('Error: expected second argument to start with {') return # remove the curly braces query = query[1:-1] iden = await self._match_idens(core, prefix) if iden is None: return await core.updateCronJob(iden, query) self.printf(f'Modified cron job {iden}') async def _handle_enable(self, core, opts): prefix = opts.prefix iden = await self._match_idens(core, prefix) if iden is None: return await core.enableCronJob(iden) self.printf(f'Enabled cron job {iden}') async def _handle_disable(self, core, opts): prefix = opts.prefix iden = await self._match_idens(core, prefix) if iden is None: return await core.disableCronJob(iden) self.printf(f'Disabled cron job {iden}') async def _handle_del(self, core, opts): prefix = opts.prefix iden = await self._match_idens(core, prefix) if iden is None: return await core.delCronJob(iden) self.printf(f'Deleted cron job {iden}') async def _handle_stat(self, core, opts): ''' Prints details about a particular cron job. Not actually a different API call ''' prefix = opts.prefix crons = await core.listCronJobs() idens = [cron['iden'] for cron in crons] matches = [iden for iden in idens if iden.startswith(prefix)] if len(matches) == 0: self.printf('Error: provided iden does not match any valid authorized cron job') return elif len(matches) > 1: self.printf('Error: provided iden matches more than one cron job') return iden = matches[0] cron = [cron for cron in crons if cron.get('iden') == iden][0] user = cron.get('username') or '<None>' query = cron.get('query') or '<missing>' isrecur = 'Yes' if cron.get('recur') else 'No' enabled = 'Yes' if cron.get('enabled') else 'No' startcount = cron.get('startcount') or 0 recs = cron.get('recs', []) laststart = cron.get('laststarttime') lastend = cron.get('lastfinishtime') laststart = 'Never' if laststart is None else self._format_timestamp(laststart) lastend = 'Never' if lastend is None else self._format_timestamp(lastend) lastresult = cron.get('lastresult') or '<None>' self.printf(f'iden: {iden}') self.printf(f'user: {user}') self.printf(f'enabled: {enabled}') self.printf(f'recurring: {isrecur}') self.printf(f'# starts: {startcount}') self.printf(f'last start time: {laststart}') self.printf(f'last end time: {lastend}') self.printf(f'last result: {lastresult}') self.printf(f'query: {query}') if not recs: self.printf(f'entries: <None>') else: self.printf(f'entries: {'incunit':10} {'incval':6} {'required'}') for reqdict, incunit, incval in recs: reqdict = reqdict or '<None>' incunit = '<None>' if incunit is None else incunit incval = '<None>' if incval is None else incval self.printf(f' {incunit:10} {incval:6} {reqdict}') async def runCmdOpts(self, opts): s_common.deprecated('cmdr> cron') line = opts.get('line') if line is None: self.printf(self.__doc__) return core = self.getCmdItem() argv = s_parser.Parser(line).cmdrargs() try: opts = self._make_argparser().parse_args(argv) except s_exc.ParserExit: return handlers = { 'add': self._handle_add, 'del': self._handle_del, 'rm': self._handle_del, 'disable': self._handle_disable, 'enable': self._handle_enable, 'list': self._handle_list, 'ls': self._handle_list, 'mod': self._handle_mod, 'edit': self._handle_mod, 'stat': self._handle_stat, } await handlers[opts.cmd](core, opts) class At(s_cli.Cmd): ''' Adds a non-recurring cron job. It will execute a Storm query at one or more specified times. List/details/deleting cron jobs created with 'at' use the same commands as other cron jobs: cron list/stat/del respectively. Syntax: at (time|+time delta)+ {query} Notes: This command accepts one or more time specifications followed by exactly one storm query in curly braces. Each time specification may be in synapse time delta format (e.g + 1 day) or synapse time format (e.g. 20501217030432101). Seconds will be ignored, as cron jobs' granularity is limited to minutes. All times are interpreted as UTC. The other option for time specification is a relative time from now. This consists of a plus sign, a positive integer, then one of 'minutes, hours, days'. Note that the record for a cron job is stored until explicitly deleted via "cron del". Examples: # Run a storm query in 5 minutes at +5 minutes {[inet:ipv4=1]} # Run a storm query tomorrow and in a week at +1 day +7 days {[inet:ipv4=1]} # Run a query at the end of the year Zulu at 20181231Z2359 {[inet:ipv4=1]} ''' _cmd_name = 'at' _cmd_syntax = ( ('line', {'type': 'glob'}), # type: ignore ) def _make_argparser(self): parser = s_cmd.Parser(prog='at', outp=self, description=self.__doc__) parser.add_argument('args', nargs='+', help='date | delta| {query})') return parser async def runCmdOpts(self, opts): s_common.deprecated('cmdr> at') line = opts.get('line') if line is None: self.printf(self.__doc__) return core = self.getCmdItem() argv = s_parser.Parser(line).cmdrargs() # Currently, using an argparser is overkill for this command. Using for future extensibility (and help). try: opts = self._make_argparser().parse_args(argv) except s_exc.ParserExit: return query = None consumed_next = False tslist = [] # TODO: retrieve time from cortex in case of wrong cmdr time now = time.time() for pos, arg in enumerate(opts.args): try: if consumed_next: consumed_next = False continue if arg.startswith('{'): if query is not None: self.printf('Error: only a single query is allowed') return query = arg[1:-1] continue if arg.startswith('+'): if arg[-1].isdigit(): if pos == len(opts.args) - 1: self.printf('Time delta missing unit') return arg = f'{arg} {opts.args[pos + 1]}' consumed_next = True ts = now + s_time.delta(arg) / 1000.0 tslist.append(ts) continue ts = s_time.parse(arg) / 1000.0 tslist.append(ts) except (ValueError, s_exc.BadTypeValu): self.printf(f'Error: Trouble parsing "{arg}"') return if query is None: self.printf('Error: Missing query argument') return def _ts_to_reqdict(ts): dt = datetime.datetime.fromtimestamp(ts, datetime.timezone.utc) return { 'minute': dt.minute, 'hour': dt.hour, 'dayofmonth': dt.day, 'month': dt.month, 'year': dt.year } if not tslist: self.printf('Error: at least one requirement must be provided') return reqdicts = [_ts_to_reqdict(ts) for ts in tslist] cdef = {'storm': query, 'reqs': reqdicts, 'incunit': None, 'incvals': None, } newcdef = await core.addCronJob(cdef) self.printf(f'Created cron job {newcdef['iden']}')
import time import types import calendar import datetime import functools import synapse.exc as s_exc import synapse.common as s_common import synapse.lib.cli as s_cli import synapse.lib.cmd as s_cmd import synapse.lib.time as s_time import synapse.lib.parser as s_parser StatHelp = ''' Gives detailed information about a single cron job. Syntax: cron stat <iden prefix> Notes: Any prefix that matches exactly one valid cron job iden is accepted. ''' DelHelp = ''' Deletes a single cron job. Syntax: cron del|rm <iden prefix> Notes: Any prefix that matches exactly one valid cron job iden is accepted. ''' ListHelp = ''' List existing cron jobs in a cortex. Syntax: cron list|ls Example: user iden en? rpt? now? err? # start last start last end query root 029ce7bd.. Y Y N 17863 2019-06-11T21:47 2019-06-11T21:47 exec foo root 06b46533.. Y Y N 18140 2019-06-11T21:48 2019-06-11T21:48 exec bar ''' ModHelp = ''' Changes an existing cron job's query. Syntax: cron mod|edit <iden prefix> <new query> Notes: Any prefix that matches exactly one valid cron iden is accepted. ''' EnableHelp = ''' Enable an existing cron job. Syntax: cron enable <iden prefix> Notes: Any prefix that matches exactly one valid cron iden is accepted. ''' DisableHelp = ''' Disable an existing cron job. Syntax: cron disable <iden prefix> Notes: Any prefix that matches exactly one valid cron iden is accepted. ''' AddHelp = ''' Add a recurring cron job to a cortex. Syntax: cron add [optional arguments] {query} --minute, -M int[,int...][=] --hour, -H --day, -d --month, -m --year, -y *or:* [--hourly <min> | --daily <hour>:<min> | --monthly <day>:<hour>:<min> | --yearly <month>:<day>:<hour>:<min>] Notes: All times are interpreted as UTC. All arguments are interpreted as the job period, unless the value ends in an equals sign, in which case the argument is interpreted as the recurrence period. Only one recurrence period parameter may be specified. Currently, a fixed unit must not be larger than a specified recurrence period. i.e. '--hour 7 --minute +15' (every 15 minutes from 7-8am?) is not supported. Value values for fixed hours are 0-23 on a 24-hour clock where midnight is 0. If the --day parameter value does not start with in '+' and is an integer, it is interpreted as a fixed day of the month. A negative integer may be specified to count from the end of the month with -1 meaning the last day of the month. All fixed day values are clamped to valid days, so for example '-d 31' will run on February 28. If the fixed day parameter is a value in ([Mon, Tue, Wed, Thu, Fri, Sat, Sun] if locale is set to English) it is interpreted as a fixed day of the week. Otherwise, if the parameter value starts with a '+', then it is interpreted as an recurrence interval of that many days. If no plus-sign-starting parameter is specified, the recurrence period defaults to the unit larger than all the fixed parameters. e.g. '-M 5' means every hour at 5 minutes past, and -H 3, -M 1 means 3:01 every day. At least one optional parameter must be provided. All parameters accept multiple comma-separated values. If multiple parameters have multiple values, all combinations of those values are used. All fixed units not specified lower than the recurrence period default to the lowest valid value, e.g. -m +2 will be scheduled at 12:00am the first of every other month. One exception is the largest fixed value is day of the week, then the default period is set to be a week. A month period with a day of week fixed value is not currently supported. Fixed-value year (i.e. --year 2019) is not supported. See the 'at' command for one-time cron jobs. As an alternative to the above options, one may use exactly one of --hourly, --daily, --monthly, --yearly with a colon-separated list of fixed parameters for the value. It is an error to use both the individual options and these aliases at the same time. Examples: Run a query every last day of the month at 3 am cron add -H 3 -d-1 {#foo} Run a query every 8 hours cron add -H +8 {#foo} Run a query every Wednesday and Sunday at midnight and noon cron add -H 0,12 -d Wed,Sun {#foo} Run a query every other day at 3:57pm cron add -d +2 -M 57 -H 15 {#foo} ''' class Cron(s_cli.Cmd): ''' Manages cron jobs in a cortex. Cron jobs are rules persistently stored in a cortex such that storm queries automatically run on a time schedule. Cron jobs may be be recurring or one-time. Use the 'at' command to add one-time jobs. A subcommand is required. Use 'cron -h' for more detailed help. ''' _cmd_name = 'cron' _cmd_syntax = ( ('line', {'type': 'glob'}), # type: ignore ) async def _match_idens(self, core, prefix): ''' Returns the iden that starts with prefix. Prints out error and returns None if it doesn't match exactly one. ''' idens = [cron['iden'] for cron in await core.listCronJobs()] matches = [iden for iden in idens if iden.startswith(prefix)] if len(matches) == 1: return matches[0] elif len(matches) == 0: self.printf('Error: provided iden does not match any valid authorized cron job') else: self.printf('Error: provided iden matches more than one cron job') return None def _make_argparser(self): parser = s_cmd.Parser(prog='cron', outp=self, description=self.__doc__) subparsers = parser.add_subparsers(title='subcommands', required=True, dest='cmd', parser_class=functools.partial(s_cmd.Parser, outp=self)) subparsers.add_parser('list', aliases=['ls'], help="List cron jobs you're allowed to manipulate", usage=ListHelp) parser_add = subparsers.add_parser('add', help='add a cron job', usage=AddHelp) parser_add.add_argument('--minute', '-M') parser_add.add_argument('--hour', '-H') parser_add.add_argument('--day', '-d', help='day of week, day of month or number of days') parser_add.add_argument('--month', '-m') parser_add.add_argument('--year', '-y') group = parser_add.add_mutually_exclusive_group() group.add_argument('--hourly') group.add_argument('--daily') group.add_argument('--monthly') group.add_argument('--yearly') parser_add.add_argument('query', help='Storm query in curly braces') parser_del = subparsers.add_parser('del', aliases=['rm'], help='delete a cron job', usage=DelHelp) parser_del.add_argument('prefix', help='Cron job iden prefix') parser_stat = subparsers.add_parser('stat', help='details a cron job', usage=StatHelp) parser_stat.add_argument('prefix', help='Cron job iden prefix') parser_mod = subparsers.add_parser('mod', aliases=['edit'], help='change an existing cron job', usage=ModHelp) parser_mod.add_argument('prefix', help='Cron job iden prefix') parser_mod.add_argument('query', help='New Storm query in curly braces') parser_en = subparsers.add_parser('enable', help='enable an existing cron job', usage=EnableHelp) parser_en.add_argument('prefix', help='Cron job iden prefix') parser_dis = subparsers.add_parser('disable', help='disable an existing cron job', usage=DisableHelp) parser_dis.add_argument('prefix', help='Cron job iden prefix') return parser @staticmethod def _parse_weekday(val): ''' Try to match a day-of-week abbreviation, then try a day-of-week full name ''' val = val.title() try: return list(calendar.day_abbr).index(val) except ValueError: try: return list(calendar.day_name).index(val) except ValueError: return None @staticmethod def _parse_incval(incunit, incval): ''' Parse a non-day increment value. Should be an integer or a comma-separated integer list. ''' try: retn = [int(val) for val in incval.split(',')] except ValueError: return None return retn[0] if len(retn) == 1 else retn @staticmethod def _parse_req(requnit, reqval): ''' Parse a non-day fixed value ''' assert reqval[0] != '=' try: retn = [] for val in reqval.split(','): if requnit == 'month': if reqval[0].isdigit(): retn.append(int(reqval)) # must be a month (1-12) else: try: retn.append(list(calendar.month_abbr).index(val.title())) except ValueError: retn.append(list(calendar.month_name).index(val.title())) else: retn.append(int(val)) except ValueError: return None if not retn: return None return retn[0] if len(retn) == 1 else retn @staticmethod def _parse_day(optval): ''' Parse a --day argument ''' isreq = not optval.startswith('+') if not isreq: optval = optval[1:] try: retnval = [] unit = None for val in optval.split(','): if not val: raise ValueError if val[-1].isdigit(): newunit = 'dayofmonth' if isreq else 'day' if unit is None: unit = newunit elif newunit != unit: raise ValueError retnval.append(int(val)) else: newunit = 'dayofweek' if unit is None: unit = newunit elif newunit != unit: raise ValueError weekday = Cron._parse_weekday(val) if weekday is None: raise ValueError retnval.append(weekday) if len(retnval) == 0: raise ValueError except ValueError: return None, None if len(retnval) == 1: retnval = retnval[0] return unit, retnval def _parse_alias(self, opts): retn = types.SimpleNamespace() retn.query = opts.query if opts.hourly is not None: retn.hour = '+1' retn.minute = str(int(opts.hourly)) return retn if opts.daily is not None: fields = time.strptime(opts.daily, '%H:%M') retn.day = '+1' retn.hour = str(fields.tm_hour) retn.minute = str(fields.tm_min) return retn if opts.monthly is not None: day, rest = opts.monthly.split(':', 1) fields = time.strptime(rest, '%H:%M') retn.month = '+1' retn.day = day retn.hour = str(fields.tm_hour) retn.minute = str(fields.tm_min) return retn if opts.yearly is not None: fields = opts.yearly.split(':') if len(fields) != 4: raise ValueError(f'Failed to parse parameter {opts.yearly}') retn.year = '+1' retn.month, retn.day, retn.hour, retn.minute = fields return retn return None async def _handle_add(self, core, opts): incunit = None incval = None reqdict = {} valinfo = { # unit: (minval, next largest unit) 'month': (1, 'year'), 'dayofmonth': (1, 'month'), 'hour': (0, 'day'), 'minute': (0, 'hour'), } if not opts.query.startswith('{'): self.printf('Error: query parameter must start with {') return try: alias_opts = self._parse_alias(opts) except ValueError as e: self.printf(f'Error: Failed to parse ..ly parameter: {" ".join(e.args)}') return if alias_opts: if opts.year or opts.month or opts.day or opts.hour or opts.minute: self.printf('Error: may not use both alias (..ly) and explicit options at the same time') return opts = alias_opts for optname in ('year', 'month', 'day', 'hour', 'minute'): optval = getattr(opts, optname, None) if optval is None: if incunit is None and not reqdict: continue # The option isn't set, but a higher unit is. Go ahead and set the required part to the lowest valid # value, e.g. so -m 2 would run on the *first* of every other month at midnight if optname == 'day': reqdict['dayofmonth'] = 1 else: reqdict[optname] = valinfo[optname][0] continue isreq = not optval.startswith('+') if optname == 'day': unit, val = self._parse_day(optval) if val is None: self.printf(f'Error: failed to parse day value "{optval}"') return if unit == 'dayofweek': if incunit is not None: self.printf('Error: May not provide a recurrence value with day of week') return if reqdict: self.printf('Error: may not fix month or year with day of week') return incunit, incval = unit, val elif unit == 'day': incunit, incval = unit, val else: assert unit == 'dayofmonth' reqdict[unit] = val continue if not isreq: if incunit is not None: self.printf('Error: may not provide more than 1 recurrence parameter') return if reqdict: self.printf('Error: fixed unit may not be larger than recurrence unit') return incunit = optname incval = self._parse_incval(optname, optval) if incval is None: self.printf('Error: failed to parse parameter') return continue if optname == 'year': self.printf('Error: year may not be a fixed value') return reqval = self._parse_req(optname, optval) if reqval is None: self.printf(f'Error: failed to parse fixed parameter "{optval}"') return reqdict[optname] = reqval # If not set, default (incunit, incval) to (1, the next largest unit) if incunit is None: if not reqdict: self.printf('Error: must provide at least one optional argument') return requnit = next(iter(reqdict)) # the first key added is the biggest unit incunit = valinfo[requnit][1] incval = 1 # Remove the curly braces query = opts.query[1:-1] cdef = {'storm': query, 'reqs': reqdict, 'incunit': incunit, 'incvals': incval, } newcdef = await core.addCronJob(cdef) self.printf(f'Created cron job {newcdef["iden"]}') @staticmethod def _format_timestamp(ts): # N.B. normally better to use fromtimestamp with UTC timezone, but we don't want timezone to print out return datetime.datetime.utcfromtimestamp(ts).isoformat(timespec='minutes') async def _handle_list(self, core, opts): cronlist = await core.listCronJobs() if not cronlist: self.printf('No cron jobs found') return self.printf( f'{"user":10} {"iden":10} {"en?":3} {"rpt?":4} {"now?":4} {"err?":4} ' f'{"# start":7} {"last start":16} {"last end":16} {"query"}') for cron in cronlist: iden = cron.get('iden') idenf = iden[:8] + '..' user = cron.get('username') or '<None>' query = cron.get('query') or '<missing>' isrecur = 'Y' if cron.get('recur') else 'N' isrunning = 'Y' if cron.get('isrunning') else 'N' enabled = 'Y' if cron.get('enabled') else 'N' startcount = cron.get('startcount') or 0 laststart = cron.get('laststarttime') laststart = 'Never' if laststart is None else self._format_timestamp(laststart) lastend = cron.get('lastfinishtime') lastend = 'Never' if lastend is None else self._format_timestamp(lastend) result = cron.get('lastresult') iserr = 'X' if result is not None and not result.startswith('finished successfully') else ' ' self.printf( f'{user:10} {idenf:10} {enabled:3} {isrecur:4} {isrunning:4} {iserr:4} ' f'{startcount:7} {laststart:16} {lastend:16} {query}') async def _handle_mod(self, core, opts): prefix = opts.prefix query = opts.query if not query.startswith('{'): self.printf('Error: expected second argument to start with {') return # remove the curly braces query = query[1:-1] iden = await self._match_idens(core, prefix) if iden is None: return await core.updateCronJob(iden, query) self.printf(f'Modified cron job {iden}') async def _handle_enable(self, core, opts): prefix = opts.prefix iden = await self._match_idens(core, prefix) if iden is None: return await core.enableCronJob(iden) self.printf(f'Enabled cron job {iden}') async def _handle_disable(self, core, opts): prefix = opts.prefix iden = await self._match_idens(core, prefix) if iden is None: return await core.disableCronJob(iden) self.printf(f'Disabled cron job {iden}') async def _handle_del(self, core, opts): prefix = opts.prefix iden = await self._match_idens(core, prefix) if iden is None: return await core.delCronJob(iden) self.printf(f'Deleted cron job {iden}') async def _handle_stat(self, core, opts): ''' Prints details about a particular cron job. Not actually a different API call ''' prefix = opts.prefix crons = await core.listCronJobs() idens = [cron['iden'] for cron in crons] matches = [iden for iden in idens if iden.startswith(prefix)] if len(matches) == 0: self.printf('Error: provided iden does not match any valid authorized cron job') return elif len(matches) > 1: self.printf('Error: provided iden matches more than one cron job') return iden = matches[0] cron = [cron for cron in crons if cron.get('iden') == iden][0] user = cron.get('username') or '<None>' query = cron.get('query') or '<missing>' isrecur = 'Yes' if cron.get('recur') else 'No' enabled = 'Yes' if cron.get('enabled') else 'No' startcount = cron.get('startcount') or 0 recs = cron.get('recs', []) laststart = cron.get('laststarttime') lastend = cron.get('lastfinishtime') laststart = 'Never' if laststart is None else self._format_timestamp(laststart) lastend = 'Never' if lastend is None else self._format_timestamp(lastend) lastresult = cron.get('lastresult') or '<None>' self.printf(f'iden: {iden}') self.printf(f'user: {user}') self.printf(f'enabled: {enabled}') self.printf(f'recurring: {isrecur}') self.printf(f'# starts: {startcount}') self.printf(f'last start time: {laststart}') self.printf(f'last end time: {lastend}') self.printf(f'last result: {lastresult}') self.printf(f'query: {query}') if not recs: self.printf(f'entries: <None>') else: self.printf(f'entries: {"incunit":10} {"incval":6} {"required"}') for reqdict, incunit, incval in recs: reqdict = reqdict or '<None>' incunit = '<None>' if incunit is None else incunit incval = '<None>' if incval is None else incval self.printf(f' {incunit:10} {incval:6} {reqdict}') async def runCmdOpts(self, opts): s_common.deprecated('cmdr> cron') line = opts.get('line') if line is None: self.printf(self.__doc__) return core = self.getCmdItem() argv = s_parser.Parser(line).cmdrargs() try: opts = self._make_argparser().parse_args(argv) except s_exc.ParserExit: return handlers = { 'add': self._handle_add, 'del': self._handle_del, 'rm': self._handle_del, 'disable': self._handle_disable, 'enable': self._handle_enable, 'list': self._handle_list, 'ls': self._handle_list, 'mod': self._handle_mod, 'edit': self._handle_mod, 'stat': self._handle_stat, } await handlers[opts.cmd](core, opts) class At(s_cli.Cmd): ''' Adds a non-recurring cron job. It will execute a Storm query at one or more specified times. List/details/deleting cron jobs created with 'at' use the same commands as other cron jobs: cron list/stat/del respectively. Syntax: at (time|+time delta)+ {query} Notes: This command accepts one or more time specifications followed by exactly one storm query in curly braces. Each time specification may be in synapse time delta format (e.g + 1 day) or synapse time format (e.g. 20501217030432101). Seconds will be ignored, as cron jobs' granularity is limited to minutes. All times are interpreted as UTC. The other option for time specification is a relative time from now. This consists of a plus sign, a positive integer, then one of 'minutes, hours, days'. Note that the record for a cron job is stored until explicitly deleted via "cron del". Examples: # Run a storm query in 5 minutes at +5 minutes {[inet:ipv4=1]} # Run a storm query tomorrow and in a week at +1 day +7 days {[inet:ipv4=1]} # Run a query at the end of the year Zulu at 20181231Z2359 {[inet:ipv4=1]} ''' _cmd_name = 'at' _cmd_syntax = ( ('line', {'type': 'glob'}), # type: ignore ) def _make_argparser(self): parser = s_cmd.Parser(prog='at', outp=self, description=self.__doc__) parser.add_argument('args', nargs='+', help='date | delta| {query})') return parser async def runCmdOpts(self, opts): s_common.deprecated('cmdr> at') line = opts.get('line') if line is None: self.printf(self.__doc__) return core = self.getCmdItem() argv = s_parser.Parser(line).cmdrargs() # Currently, using an argparser is overkill for this command. Using for future extensibility (and help). try: opts = self._make_argparser().parse_args(argv) except s_exc.ParserExit: return query = None consumed_next = False tslist = [] # TODO: retrieve time from cortex in case of wrong cmdr time now = time.time() for pos, arg in enumerate(opts.args): try: if consumed_next: consumed_next = False continue if arg.startswith('{'): if query is not None: self.printf('Error: only a single query is allowed') return query = arg[1:-1] continue if arg.startswith('+'): if arg[-1].isdigit(): if pos == len(opts.args) - 1: self.printf('Time delta missing unit') return arg = f'{arg} {opts.args[pos + 1]}' consumed_next = True ts = now + s_time.delta(arg) / 1000.0 tslist.append(ts) continue ts = s_time.parse(arg) / 1000.0 tslist.append(ts) except (ValueError, s_exc.BadTypeValu): self.printf(f'Error: Trouble parsing "{arg}"') return if query is None: self.printf('Error: Missing query argument') return def _ts_to_reqdict(ts): dt = datetime.datetime.fromtimestamp(ts, datetime.timezone.utc) return { 'minute': dt.minute, 'hour': dt.hour, 'dayofmonth': dt.day, 'month': dt.month, 'year': dt.year } if not tslist: self.printf('Error: at least one requirement must be provided') return reqdicts = [_ts_to_reqdict(ts) for ts in tslist] cdef = {'storm': query, 'reqs': reqdicts, 'incunit': None, 'incvals': None, } newcdef = await core.addCronJob(cdef) self.printf(f'Created cron job {newcdef["iden"]}')
import json from pyrogram import Client, filters from firebase import firebase from process import check, searches, truecaller_search, fb_search, logreturn, log, eyecon_search from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton from creds import cred firebase = firebase.FirebaseApplication(cred.DB_URL) app = Client( "KNOW-WHO-BOT", api_id=cred.API_ID, api_hash=cred.API_HASH, bot_token=cred.BOT_TOKEN ) @app.on_message(filters.command(["start"])) def start(client, message): client.send_message(chat_id=message.chat.id, text=f"`Hi` **{message.from_user.first_name}**\n `Enter the 10 digit Indian Mobile number to search...`",reply_markup=InlineKeyboardMarkup( [[InlineKeyboardButton("About", callback_data="about"), InlineKeyboardButton("Source", callback_data="src")]])) check_status = check(message.chat.id) @app.on_callback_query() def newbt(client,callback_query): txt=callback_query.data if txt=="about": callback_query.message.edit(text=f"`Bot` : [Identify Unknown Number](t.me/phonecalltrackerbot)\n`Creator :` [Rohithaditya](t.me/rohithaditya)\n`Language:` [Python3](https://python.org)\n`Library :` [Pyrogram](https://docs.pyrogram.org/) \n`Server :` [Heroku](https://herokuapp.com/)", disable_web_page_preview=True, reply_markup=InlineKeyboardMarkup( [[InlineKeyboardButton("Give Feedback", url="t.me/rohithaditya")]])) elif txt=="src": callback_query.message.edit(text="Enjoy...:-D\nhttps://github.com/rohithaditya/truecaller", disable_web_page_preview=True) @app.on_message(filters.command(["about"])) def about(client, message): client.send_message(chat_id=message.chat.id, reply_to_message_id=message.message_id, text=f"`Bot` : [True Caller Bot](t.me/phonecalltrackerbot)\n`Creator :` [agentnova](t.me/rohith)\n`Language:` [Python3](https://python.org)\n`Library :` [Pyrogram](https://docs.pyrogram.org/) \n`Server :` [Heroku](https://herokuapp.com/)", disable_web_page_preview=True, reply_markup=InlineKeyboardMarkup( [[InlineKeyboardButton("Feedback", url="t.me/agentnova")]])) @app.on_message(filters.command(["log"])) def stats(client, message): stat = client.send_message(chat_id=message.chat.id, reply_to_message_id=message.message_id, text="`Fetching details`") txt = logreturn() stat.edit(txt) @app.on_message(filters.text) def echo(client, message): actvt = "" actvt = firebase.get('/stats', 'total_searches') data = {"total_searches": 1} if not actvt: firebase.put('/stats', 'total_searches', data) global pq pq = "" pro = client.send_message(chat_id=message.chat.id, text="Searching...", reply_to_message_id=message.message_id) r_num = message.text num = r_num.replace("+91", "").replace(" ", "") frbseyename = "" frbsefb = "" frbsetrname = "" frbsetrmail = "" if num.isnumeric and len(num) == 10: pq = "\n\n**----••Truecaller says----**\n\nLimit exceeded ,try again tomorrow 🤦🏻‍♂️" tresponse = "" try: tresponse = truecaller_search(cred.T_AUTH, num) if tresponse: restj = tresponse.json() trslt = json.dumps(restj) tjsonload = json.loads(trslt) if "name" in tjsonload['data'][0]: if tjsonload['data'][0]['internetAddresses']: pq = f"\n\n**----••Truecaller says----**\n\nName : `{tjsonload["data"][0]["name"]}`\nCarrier : `{tjsonload["data"][0]["phones"][0]["carrier"]}` \nE-mail : {tjsonload["data"][0]["internetAddresses"][0]["id"]}" frbsetrname = tjsonload['data'][0]['name'] frbsetrmail = tjsonload['data'][0]['internetAddresses'][0]['id'] elif not tjsonload['data'][0]['internetAddresses']: pq = f"\n\n**----••Truecaller says----**\n\nName : `{tjsonload["data"][0]["name"]}`\nCarrier : `{tjsonload["data"][0]["phones"][0]["carrier"]}`" frbsetrname = tjsonload['data'][0]['name'] else: pq = "\n\n**----••Truecaller says----**\n\nNo results found 🤦🏻‍♂️" if tresponse.status_code == 429: pq = "\n\n**----••Truecaller says----**\n\nLimit exceeded ,try again tomorrow 🤦🏻‍♂️" except: pass response = eyecon_search(num) fbres = fb_search(num) fbrslt = fbres.url.replace('https://graph.', '').replace('picture?width=600', '') if response: rslt = response.json() if rslt: temp = json.dumps(rslt).replace('[', '').replace(']', '') jsonload = json.loads(temp) yk = f"\n\n**----••Eyecon says----**\n\nName :`{jsonload["name"]}`" frbseyename = jsonload["name"] if "facebook.com" in fbrslt: yk = f"\n\n**----••Eyecon says----**\n\nName : `{jsonload["name"]}`\nFacebook : {fbrslt}" frbseyename = jsonload["name"] frbsefb = fbrslt else: yk = "**----••Eyecon says----**\n\nNo results found 🤦🏻‍♂️" else: yk = "**----••Eyecon says----**\n\nNo results found 🤦🏻‍♂️" yk += pq pro.edit(text=yk, disable_web_page_preview=True,reply_markup=InlineKeyboardMarkup( [[InlineKeyboardButton("Source", callback_data="src")]])) searches() log() if frbseyename and frbsefb and frbsetrname and frbsetrmail: data = { "Eyecon Name": frbseyename, "Mob": num, "Truecaller name": frbsetrname, "Facebook": frbsefb, "Mail": frbsetrmail } firebase.put('/knowho-log', num, data) elif frbseyename and frbsefb and frbsetrname: data = { "Eyecon Name": frbseyename, "Mob": num, "Truecaller name": frbsetrname, "Facebook": frbsefb } firebase.put('/knowho-log', num, data) elif frbseyename and frbsefb: data = { "Eyecon Name": frbseyename, "Mob": num, "Facebook": frbsefb } firebase.put('/knowho-log', num, data) elif frbseyename and frbsetrname and frbsetrmail: data = { "Eyecon Name": frbseyename, "Mob": num, "Truecaller name": frbsetrname, "Mail": frbsetrmail } firebase.put('/knowho-log', num, data) elif frbseyename and frbsetrname: data = { "Eyecon Name": frbseyename, "Mob": num, "Truecaller name": frbsetrname } firebase.put('/knowho-log', num, data) elif frbsetrname and frbsetrmail: data = { "Truecaller name": frbsetrname, "Mob": num, "Mail": frbsetrmail } firebase.put('/knowho-log', num, data) elif frbsetrname: data = { "Truecaller name": frbsetrname } firebase.put('/knowho-log', num, data) else: pro.edit("`Only` **10** `digit numbers allowed` 🤦🏻‍♂️") app.run()
import json from pyrogram import Client, filters from firebase import firebase from process import check, searches, truecaller_search, fb_search, logreturn, log, eyecon_search from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton from creds import cred firebase = firebase.FirebaseApplication(cred.DB_URL) app = Client( "KNOW-WHO-BOT", api_id=cred.API_ID, api_hash=cred.API_HASH, bot_token=cred.BOT_TOKEN ) @app.on_message(filters.command(["start"])) def start(client, message): client.send_message(chat_id=message.chat.id, text=f"`Hi` **{message.from_user.first_name}**\n `Enter the 10 digit Indian Mobile number to search...`",reply_markup=InlineKeyboardMarkup( [[InlineKeyboardButton("About", callback_data="about"), InlineKeyboardButton("Source", callback_data="src")]])) check_status = check(message.chat.id) @app.on_callback_query() def newbt(client,callback_query): txt=callback_query.data if txt=="about": callback_query.message.edit(text=f"`Bot` : [Identify Unknown Number](t.me/phonecalltrackerbot)\n`Creator :` [Rohithaditya](t.me/rohithaditya)\n`Language:` [Python3](https://python.org)\n`Library :` [Pyrogram](https://docs.pyrogram.org/) \n`Server :` [Heroku](https://herokuapp.com/)", disable_web_page_preview=True, reply_markup=InlineKeyboardMarkup( [[InlineKeyboardButton("Give Feedback", url="t.me/rohithaditya")]])) elif txt=="src": callback_query.message.edit(text="Enjoy...:-D\nhttps://github.com/rohithaditya/truecaller", disable_web_page_preview=True) @app.on_message(filters.command(["about"])) def about(client, message): client.send_message(chat_id=message.chat.id, reply_to_message_id=message.message_id, text=f"`Bot` : [True Caller Bot](t.me/phonecalltrackerbot)\n`Creator :` [agentnova](t.me/rohith)\n`Language:` [Python3](https://python.org)\n`Library :` [Pyrogram](https://docs.pyrogram.org/) \n`Server :` [Heroku](https://herokuapp.com/)", disable_web_page_preview=True, reply_markup=InlineKeyboardMarkup( [[InlineKeyboardButton("Feedback", url="t.me/agentnova")]])) @app.on_message(filters.command(["log"])) def stats(client, message): stat = client.send_message(chat_id=message.chat.id, reply_to_message_id=message.message_id, text="`Fetching details`") txt = logreturn() stat.edit(txt) @app.on_message(filters.text) def echo(client, message): actvt = "" actvt = firebase.get('/stats', 'total_searches') data = {"total_searches": 1} if not actvt: firebase.put('/stats', 'total_searches', data) global pq pq = "" pro = client.send_message(chat_id=message.chat.id, text="Searching...", reply_to_message_id=message.message_id) r_num = message.text num = r_num.replace("+91", "").replace(" ", "") frbseyename = "" frbsefb = "" frbsetrname = "" frbsetrmail = "" if num.isnumeric and len(num) == 10: pq = "\n\n**----••Truecaller says----**\n\nLimit exceeded ,try again tomorrow 🤦🏻‍♂️" tresponse = "" try: tresponse = truecaller_search(cred.T_AUTH, num) if tresponse: restj = tresponse.json() trslt = json.dumps(restj) tjsonload = json.loads(trslt) if "name" in tjsonload['data'][0]: if tjsonload['data'][0]['internetAddresses']: pq = f"\n\n**----••Truecaller says----**\n\nName : `{tjsonload['data'][0]['name']}`\nCarrier : `{tjsonload['data'][0]['phones'][0]['carrier']}` \nE-mail : {tjsonload['data'][0]['internetAddresses'][0]['id']}" frbsetrname = tjsonload['data'][0]['name'] frbsetrmail = tjsonload['data'][0]['internetAddresses'][0]['id'] elif not tjsonload['data'][0]['internetAddresses']: pq = f"\n\n**----••Truecaller says----**\n\nName : `{tjsonload['data'][0]['name']}`\nCarrier : `{tjsonload['data'][0]['phones'][0]['carrier']}`" frbsetrname = tjsonload['data'][0]['name'] else: pq = "\n\n**----••Truecaller says----**\n\nNo results found 🤦🏻‍♂️" if tresponse.status_code == 429: pq = "\n\n**----••Truecaller says----**\n\nLimit exceeded ,try again tomorrow 🤦🏻‍♂️" except: pass response = eyecon_search(num) fbres = fb_search(num) fbrslt = fbres.url.replace('https://graph.', '').replace('picture?width=600', '') if response: rslt = response.json() if rslt: temp = json.dumps(rslt).replace('[', '').replace(']', '') jsonload = json.loads(temp) yk = f"\n\n**----••Eyecon says----**\n\nName :`{jsonload['name']}`" frbseyename = jsonload["name"] if "facebook.com" in fbrslt: yk = f"\n\n**----••Eyecon says----**\n\nName : `{jsonload['name']}`\nFacebook : {fbrslt}" frbseyename = jsonload["name"] frbsefb = fbrslt else: yk = "**----••Eyecon says----**\n\nNo results found 🤦🏻‍♂️" else: yk = "**----••Eyecon says----**\n\nNo results found 🤦🏻‍♂️" yk += pq pro.edit(text=yk, disable_web_page_preview=True,reply_markup=InlineKeyboardMarkup( [[InlineKeyboardButton("Source", callback_data="src")]])) searches() log() if frbseyename and frbsefb and frbsetrname and frbsetrmail: data = { "Eyecon Name": frbseyename, "Mob": num, "Truecaller name": frbsetrname, "Facebook": frbsefb, "Mail": frbsetrmail } firebase.put('/knowho-log', num, data) elif frbseyename and frbsefb and frbsetrname: data = { "Eyecon Name": frbseyename, "Mob": num, "Truecaller name": frbsetrname, "Facebook": frbsefb } firebase.put('/knowho-log', num, data) elif frbseyename and frbsefb: data = { "Eyecon Name": frbseyename, "Mob": num, "Facebook": frbsefb } firebase.put('/knowho-log', num, data) elif frbseyename and frbsetrname and frbsetrmail: data = { "Eyecon Name": frbseyename, "Mob": num, "Truecaller name": frbsetrname, "Mail": frbsetrmail } firebase.put('/knowho-log', num, data) elif frbseyename and frbsetrname: data = { "Eyecon Name": frbseyename, "Mob": num, "Truecaller name": frbsetrname } firebase.put('/knowho-log', num, data) elif frbsetrname and frbsetrmail: data = { "Truecaller name": frbsetrname, "Mob": num, "Mail": frbsetrmail } firebase.put('/knowho-log', num, data) elif frbsetrname: data = { "Truecaller name": frbsetrname } firebase.put('/knowho-log', num, data) else: pro.edit("`Only` **10** `digit numbers allowed` 🤦🏻‍♂️") app.run()
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from dataclasses import dataclass from typing import Optional, Tuple from pants.backend.python.lint.bandit.subsystem import Bandit from pants.backend.python.rules import pex from pants.backend.python.rules.pex import ( Pex, PexInterpreterConstraints, PexProcess, PexRequest, PexRequirements, ) from pants.backend.python.target_types import PythonInterpreterCompatibility, PythonSources from pants.core.goals.lint import LintReport, LintRequest, LintResult, LintResults, LintSubsystem from pants.core.util_rules import source_files, stripped_source_files from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest from pants.engine.fs import Digest, DigestSubset, GlobMatchErrorBehavior, MergeDigests, PathGlobs from pants.engine.process import FallibleProcessResult from pants.engine.rules import Get, MultiGet, collect_rules, rule from pants.engine.target import FieldSet from pants.engine.unions import UnionRule from pants.python.python_setup import PythonSetup from pants.util.logging import LogLevel from pants.util.strutil import pluralize @dataclass(frozen=True) class BanditFieldSet(FieldSet): required_fields = (PythonSources,) sources: PythonSources compatibility: PythonInterpreterCompatibility class BanditRequest(LintRequest): field_set_type = BanditFieldSet @dataclass(frozen=True) class BanditPartition: field_sets: Tuple[BanditFieldSet, ...] interpreter_constraints: PexInterpreterConstraints def generate_args( *, source_files: SourceFiles, bandit: Bandit, report_file_name: Optional[str] ) -> Tuple[str, ...]: args = [] if bandit.config is not None: args.append(f"--config={bandit.config}") if report_file_name: args.append(f"--output={report_file_name}") args.extend(bandit.args) args.extend(source_files.files) return tuple(args) @rule(level=LogLevel.DEBUG) async def bandit_lint_partition( partition: BanditPartition, bandit: Bandit, lint_subsystem: LintSubsystem ) -> LintResult: requirements_pex_request = Get( Pex, PexRequest( output_filename="bandit.pex", internal_only=True, requirements=PexRequirements(bandit.all_requirements), interpreter_constraints=( partition.interpreter_constraints or PexInterpreterConstraints(bandit.interpreter_constraints) ), entry_point=bandit.entry_point, ), ) config_digest_request = Get( Digest, PathGlobs( globs=[bandit.config] if bandit.config else [], glob_match_error_behavior=GlobMatchErrorBehavior.error, description_of_origin="the option `--bandit-config`", ), ) source_files_request = Get( SourceFiles, SourceFilesRequest(field_set.sources for field_set in partition.field_sets) ) requirements_pex, config_digest, source_files = await MultiGet( requirements_pex_request, config_digest_request, source_files_request ) input_digest = await Get( Digest, MergeDigests((source_files.snapshot.digest, requirements_pex.digest, config_digest)) ) report_file_name = "bandit_report.txt" if lint_subsystem.reports_dir else None result = await Get( FallibleProcessResult, PexProcess( requirements_pex, argv=generate_args( source_files=source_files, bandit=bandit, report_file_name=report_file_name ), input_digest=input_digest, description=f"Run Bandit on {pluralize(len(partition.field_sets), "file")}.", output_files=(report_file_name,) if report_file_name else None, level=LogLevel.DEBUG, ), ) report = None if report_file_name: report_digest = await Get( Digest, DigestSubset( result.output_digest, PathGlobs( [report_file_name], glob_match_error_behavior=GlobMatchErrorBehavior.warn, description_of_origin="Bandit report file", ), ), ) report = LintReport(report_file_name, report_digest) return LintResult.from_fallible_process_result( result, partition_description=str(sorted(partition.interpreter_constraints)), report=report ) @rule(desc="Lint with Bandit", level=LogLevel.DEBUG) async def bandit_lint( request: BanditRequest, bandit: Bandit, python_setup: PythonSetup ) -> LintResults: if bandit.skip: return LintResults([], linter_name="Bandit") # NB: Bandit output depends upon which Python interpreter version it's run with # ( https://github.com/PyCQA/bandit#under-which-version-of-python-should-i-install-bandit). We # batch targets by their constraints to ensure, for example, that all Python 2 targets run # together and all Python 3 targets run together. constraints_to_field_sets = PexInterpreterConstraints.group_field_sets_by_constraints( request.field_sets, python_setup ) partitioned_results = await MultiGet( Get(LintResult, BanditPartition(partition_field_sets, partition_compatibility)) for partition_compatibility, partition_field_sets in constraints_to_field_sets.items() ) return LintResults(partitioned_results, linter_name="Bandit") def rules(): return [ *collect_rules(), UnionRule(LintRequest, BanditRequest), *pex.rules(), *source_files.rules(), *stripped_source_files.rules(), ]
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from dataclasses import dataclass from typing import Optional, Tuple from pants.backend.python.lint.bandit.subsystem import Bandit from pants.backend.python.rules import pex from pants.backend.python.rules.pex import ( Pex, PexInterpreterConstraints, PexProcess, PexRequest, PexRequirements, ) from pants.backend.python.target_types import PythonInterpreterCompatibility, PythonSources from pants.core.goals.lint import LintReport, LintRequest, LintResult, LintResults, LintSubsystem from pants.core.util_rules import source_files, stripped_source_files from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest from pants.engine.fs import Digest, DigestSubset, GlobMatchErrorBehavior, MergeDigests, PathGlobs from pants.engine.process import FallibleProcessResult from pants.engine.rules import Get, MultiGet, collect_rules, rule from pants.engine.target import FieldSet from pants.engine.unions import UnionRule from pants.python.python_setup import PythonSetup from pants.util.logging import LogLevel from pants.util.strutil import pluralize @dataclass(frozen=True) class BanditFieldSet(FieldSet): required_fields = (PythonSources,) sources: PythonSources compatibility: PythonInterpreterCompatibility class BanditRequest(LintRequest): field_set_type = BanditFieldSet @dataclass(frozen=True) class BanditPartition: field_sets: Tuple[BanditFieldSet, ...] interpreter_constraints: PexInterpreterConstraints def generate_args( *, source_files: SourceFiles, bandit: Bandit, report_file_name: Optional[str] ) -> Tuple[str, ...]: args = [] if bandit.config is not None: args.append(f"--config={bandit.config}") if report_file_name: args.append(f"--output={report_file_name}") args.extend(bandit.args) args.extend(source_files.files) return tuple(args) @rule(level=LogLevel.DEBUG) async def bandit_lint_partition( partition: BanditPartition, bandit: Bandit, lint_subsystem: LintSubsystem ) -> LintResult: requirements_pex_request = Get( Pex, PexRequest( output_filename="bandit.pex", internal_only=True, requirements=PexRequirements(bandit.all_requirements), interpreter_constraints=( partition.interpreter_constraints or PexInterpreterConstraints(bandit.interpreter_constraints) ), entry_point=bandit.entry_point, ), ) config_digest_request = Get( Digest, PathGlobs( globs=[bandit.config] if bandit.config else [], glob_match_error_behavior=GlobMatchErrorBehavior.error, description_of_origin="the option `--bandit-config`", ), ) source_files_request = Get( SourceFiles, SourceFilesRequest(field_set.sources for field_set in partition.field_sets) ) requirements_pex, config_digest, source_files = await MultiGet( requirements_pex_request, config_digest_request, source_files_request ) input_digest = await Get( Digest, MergeDigests((source_files.snapshot.digest, requirements_pex.digest, config_digest)) ) report_file_name = "bandit_report.txt" if lint_subsystem.reports_dir else None result = await Get( FallibleProcessResult, PexProcess( requirements_pex, argv=generate_args( source_files=source_files, bandit=bandit, report_file_name=report_file_name ), input_digest=input_digest, description=f"Run Bandit on {pluralize(len(partition.field_sets), 'file')}.", output_files=(report_file_name,) if report_file_name else None, level=LogLevel.DEBUG, ), ) report = None if report_file_name: report_digest = await Get( Digest, DigestSubset( result.output_digest, PathGlobs( [report_file_name], glob_match_error_behavior=GlobMatchErrorBehavior.warn, description_of_origin="Bandit report file", ), ), ) report = LintReport(report_file_name, report_digest) return LintResult.from_fallible_process_result( result, partition_description=str(sorted(partition.interpreter_constraints)), report=report ) @rule(desc="Lint with Bandit", level=LogLevel.DEBUG) async def bandit_lint( request: BanditRequest, bandit: Bandit, python_setup: PythonSetup ) -> LintResults: if bandit.skip: return LintResults([], linter_name="Bandit") # NB: Bandit output depends upon which Python interpreter version it's run with # ( https://github.com/PyCQA/bandit#under-which-version-of-python-should-i-install-bandit). We # batch targets by their constraints to ensure, for example, that all Python 2 targets run # together and all Python 3 targets run together. constraints_to_field_sets = PexInterpreterConstraints.group_field_sets_by_constraints( request.field_sets, python_setup ) partitioned_results = await MultiGet( Get(LintResult, BanditPartition(partition_field_sets, partition_compatibility)) for partition_compatibility, partition_field_sets in constraints_to_field_sets.items() ) return LintResults(partitioned_results, linter_name="Bandit") def rules(): return [ *collect_rules(), UnionRule(LintRequest, BanditRequest), *pex.rules(), *source_files.rules(), *stripped_source_files.rules(), ]
import pytest import torch from transformers import AutoTokenizer from typing import List, Dict from pkg_resources import resource_filename from nerblackbox.modules.ner_training.data_preprocessing.tools.csv_reader import ( CsvReader, ) from nerblackbox.modules.ner_training.data_preprocessing.tools.input_example import ( InputExample, ) from nerblackbox.modules.ner_training.data_preprocessing.tools.input_examples_to_tensors import ( InputExamplesToTensors, ) from nerblackbox.modules.ner_training.data_preprocessing.tools.bert_dataset import ( BertDataset, ) from nerblackbox.modules.ner_training.data_preprocessing.data_preprocessor import ( DataPreprocessor, order_tag_list, convert_tag_list_bio2plain, ) from nerblackbox.modules.ner_training.data_preprocessing.tools.utils import ( EncodingsKeys, ) from nerblackbox.tests.utils import PseudoDefaultLogger from nerblackbox.modules.ner_training.ner_model import NEWLINE_TOKENS tokenizer = AutoTokenizer.from_pretrained( "af-ai-center/bert-base-swedish-uncased", do_lower_case=False, additional_special_tokens=NEWLINE_TOKENS, use_fast=True, ) csv_reader = CsvReader( path=resource_filename("nerblackbox", "tests/test_data"), tokenizer=tokenizer, do_lower_case=False, csv_file_separator="\t", default_logger=None, ) data_preprocessor = DataPreprocessor( tokenizer=tokenizer, do_lower_case=False, default_logger=PseudoDefaultLogger(), max_seq_length=4, ) ######################################################################################################################## ######################################################################################################################## ######################################################################################################################## class TestCsvReaderAndDataProcessor: #################################################################################################################### @pytest.mark.parametrize( "tag_list, " "input_examples", [ ( ["O", "PER", "ORG", "MISC"], { "train": [ InputExample( guid="", text="På skidspår.se kan längdskidåkare själva betygsätta förhållandena i spåren .", tags="O MISC O O O O O O O O", ), ], "val": [ InputExample( guid="", text="Fastigheten är ett landmärke designad av arkitekten Robert Stern .", tags="O O O O O O O PER PER O", ), ], "test": [ InputExample( guid="", text="Apple noteras för 85,62 poäng , vilket är den högsta siffran någonsin i undersökningen .", tags="ORG O O O O O O O O O O O O O O", ), ], "predict": [ InputExample( guid="", text="På skidspår.se kan längdskidåkare själva betygsätta förhållandena i spåren .", tags="O MISC O O O O O O O O", ), InputExample( guid="", text="Fastigheten är ett landmärke designad av arkitekten Robert Stern .", tags="O O O O O O O PER PER O", ), InputExample( guid="", text="Apple noteras för 85,62 poäng , vilket är den högsta siffran någonsin i undersökningen .", tags="ORG O O O O O O O O O O O O O O", ), ], }, ), ], ) def tests( self, tag_list: List[str], input_examples: Dict[str, List[InputExample]], ) -> None: ################################## # 1. CsvReader ################################## for phase in ["train", "val", "test"]: test_input_examples = csv_reader.get_input_examples(phase) assert ( len(test_input_examples) == 1 or 2 ), f"ERROR! len(test_input_examples) = {len(test_input_examples)} should be 1 or 2." assert ( test_input_examples[0].text == input_examples[phase][0].text ), f"phase = {phase}: test_input_examples_text = {test_input_examples[0].text} != {input_examples[phase][0].text}" assert ( test_input_examples[0].tags == input_examples[phase][0].tags ), f"phase = {phase}: test_input_examples_tags = {test_input_examples[0].tags} != {input_examples[phase][0].tags}" ################################## # 2. DataProcessor ################################## # a. get_input_examples_train test_input_examples, test_tag_list = data_preprocessor.get_input_examples_train( prune_ratio={"train": 0.5, "val": 1.0, "test": 1.0}, dataset_name=None, ) assert set(test_tag_list) == set( tag_list ), f"test_tag_list = {test_tag_list} != {tag_list}" for phase in ["train", "val", "test"]: assert ( len(test_input_examples[phase]) == 1 ), f"ERROR! len(test_input_examples[{phase}]) = {len(test_input_examples[phase])} should be 1." assert ( test_input_examples[phase][0].text == input_examples[phase][0].text ), f"phase = {phase}: test_input_examples.text = {test_input_examples[phase].text} != {input_examples[phase][0].text}" assert ( test_input_examples[phase][0].tags == input_examples[phase][0].tags ), f"phase = {phase}: test_input_examples.tags = {test_input_examples[phase].tags} != {input_examples[phase][0].tags}" # b. get_input_examples_predict test_sentences = [ elem.text for v in test_input_examples.values() for elem in v ] # retrieve example sentences test_input_examples_predict = data_preprocessor.get_input_examples_predict( test_sentences )["predict"] assert len(test_input_examples_predict) == len( input_examples["predict"] ), f"len(test_input_examples_predict) = {len(test_input_examples_predict)} != {len(input_examples["predict"])}" for (test_input_example_predict, true_input_example_predict) in zip( test_input_examples_predict, input_examples["predict"] ): assert ( test_input_example_predict.text == true_input_example_predict.text ), f"test_input_example_predict.text = {test_input_example_predict.text} != {true_input_example_predict.text}" true_input_example_predict_tags = " ".join( "O" for _ in range(len(true_input_example_predict.text.split())) ) assert ( test_input_example_predict.tags == true_input_example_predict_tags ), f"test_input_example_predict.tags = {test_input_example_predict.tags} != {true_input_example_predict_tags}" # c. to_dataloader dataloader = data_preprocessor.to_dataloader( input_examples, tag_list, batch_size=1 ) for key in ["train", "val", "test", "predict"]: assert ( key in dataloader.keys() ), f"key = {key} not in dataloader.keys() = {dataloader.keys()}" # TODO: further testing ######################################################################################################################## ######################################################################################################################## ######################################################################################################################## class TestInputExamplesToTensorsAndBertDataset: @pytest.mark.parametrize( "texts, " "labels, " "tag_tuple, " "max_seq_length, " "true_input_ids, " "true_attention_mask, " "true_token_type_ids, " "true_tag_ids, " "true_input_tokens", [ # 1. single example: no truncation ( ["arbetsförmedlingen ai-center finns i stockholm"], ["ORG ORG O O LOC"], ("O", "ORG", "LOC"), 12, torch.tensor( [[101, 7093, 2842, 8126, 1011, 5410, 1121, 1045, 1305, 102, 0, 0]] ), torch.tensor([[1] * 10 + [0] * 2]), torch.tensor([[0] * 12]), torch.tensor( [[-100, 1, -100, 1, -100, -100, 0, 0, 2, -100, -100, -100]] ), [ [ "[CLS]", "arbetsförmedl", "##ingen", "ai", "-", "center", "finns", "i", "stockholm", "[SEP]", ] + ["[PAD]"] * 2 ], ), # 2. single example: no truncation, [NEWLINE] ( ["arbetsförmedlingen ai-center [NEWLINE] finns i stockholm"], ["ORG ORG O O O LOC"], ("O", "ORG", "LOC"), 12, torch.tensor( [ [ 101, 7093, 2842, 8126, 1011, 5410, 30523, 1121, 1045, 1305, 102, 0, ] ] ), torch.tensor([[1] * 11 + [0] * 1]), torch.tensor([[0] * 12]), torch.tensor([[-100, 1, -100, 1, -100, -100, 0, 0, 0, 2, -100, -100]]), [ [ "[CLS]", "arbetsförmedl", "##ingen", "ai", "-", "center", "[NEWLINE]", "finns", "i", "stockholm", "[SEP]", ] + ["[PAD]"] * 1 ], ), # 3. single example: truncation ( ["arbetsförmedlingen ai-center finns i stockholm"], ["ORG ORG O O LOC"], ("O", "ORG", "LOC"), 4, torch.tensor( [ [101, 7093, 2842, 102], [101, 8126, 1011, 102], [101, 5410, 1121, 102], [101, 1045, 1305, 102], ] ), torch.tensor( [ [1] * 4, [1] * 4, [1] * 4, [1] * 4, ] ), torch.tensor( [ [0] * 4, [0] * 4, [0] * 4, [0] * 4, ] ), torch.tensor( [ [-100, 1, -100, -100], [-100, 1, -100, -100], [-100, -100, 0, -100], [-100, 0, 2, -100], ] ), [ ["[CLS]", "arbetsförmedl", "##ingen", "[SEP]"], ["[CLS]", "ai", "-", "[SEP]"], ["[CLS]", "center", "finns", "[SEP]"], ["[CLS]", "i", "stockholm", "[SEP]"], ], ), # 4. two examples: truncation ( ["arbetsförmedlingen ai-center", "finns i stockholm"], ["ORG ORG", "O O LOC"], ("O", "ORG", "LOC"), 4, torch.tensor( [ [101, 7093, 2842, 102], [101, 8126, 1011, 102], [101, 5410, 102, 0], [101, 1121, 1045, 102], [101, 1305, 102, 0], ] ), torch.tensor( [ [1] * 4, [1] * 4, [1] * 3 + [0], [1] * 4, [1] * 3 + [0], ] ), torch.tensor( [ [0] * 4, [0] * 4, [0] * 4, [0] * 4, [0] * 4, ] ), torch.tensor( [ [-100, 1, -100, -100], [-100, 1, -100, -100], [-100, -100, -100, -100], [-100, 0, 0, -100], [-100, 2, -100, -100], ] ), [ ["[CLS]", "arbetsförmedl", "##ingen", "[SEP]"], ["[CLS]", "ai", "-", "[SEP]"], ["[CLS]", "center", "[SEP]", "[PAD]"], ["[CLS]", "finns", "i", "[SEP]"], ["[CLS]", "stockholm", "[SEP]", "[PAD]"], ], ), ], ) def tests( self, texts: List[str], labels: List[str], tag_tuple: List[str], max_seq_length: int, true_input_ids: torch.Tensor, true_attention_mask: torch.Tensor, true_token_type_ids: torch.Tensor, true_tag_ids: torch.Tensor, true_input_tokens: torch.Tensor, ) -> None: ################################## # 1. InputExamplesToTensors ################################## input_examples = [ InputExample( guid="", text=text, tags=label, ) for text, label in zip(texts, labels) ] input_examples_to_tensors = InputExamplesToTensors( tokenizer=tokenizer, max_seq_length=max_seq_length, tag_tuple=tuple(tag_tuple), default_logger=PseudoDefaultLogger(), ) encodings = input_examples_to_tensors(input_examples, predict=False) input_tokens = [ tokenizer.convert_ids_to_tokens(input_ids_single) for input_ids_single in encodings["input_ids"] ] for (string, true) in zip( EncodingsKeys, [true_input_ids, true_attention_mask, true_token_type_ids, true_tag_ids], ): assert torch.all( torch.eq(encodings[string], true) ), f"{string} = {encodings[string]} != {true}" for (string, true, _test) in zip( ["input_tokens"], [true_input_tokens], [input_tokens], ): assert _test == true, f"{string} = {_test} != {true}" ################################## # 2. BertDataset ################################## data = BertDataset( encodings=encodings ) # data[j] = 4 torch tensors corresponding to EncodingKeys assert len(data) >= len( texts ), f"len(data) = {len(data)} < {len(texts)} = len(texts)" for i, (string, true) in enumerate( zip( ["input_ids", "attention_mask", "token_type_ids", "tag_ids"], [ true_input_ids, true_attention_mask, true_token_type_ids, true_tag_ids, ], ) ): for j in range(len(true)): assert torch.all( torch.eq(data[j][i], true[j]) ), f"{string} = {data[j][i]} != {true[j]}" ######################################################################################################################## ######################################################################################################################## ######################################################################################################################## class TestMisc: #################################################################################################################### @pytest.mark.parametrize( "tag_list, " "returned_tag_list", [ ( ["O", "PER", "ORG", "MISC"], ["O", "PER", "ORG", "MISC"], ), ( ["O", "B-PER", "B-ORG", "B-MISC"], ["O", "B-PER", "B-ORG", "B-MISC", "I-PER", "I-ORG", "I-MISC"], ), ], ) def test_ensure_completeness_in_case_of_bio_tags( self, tag_list: List[str], returned_tag_list: List[str], ) -> None: test_returned_tag_list = ( data_preprocessor._ensure_completeness_in_case_of_bio_tags( tag_list=tag_list ) ) assert ( test_returned_tag_list == returned_tag_list ), f"test_returned_tag_list = {test_returned_tag_list} != {returned_tag_list}" #################################################################################################################### @pytest.mark.parametrize( "tag_list, " "tag_list_ordered", [ ( ["O", "PER", "ORG", "MISC"], ["O", "MISC", "ORG", "PER"], ), ( ["PER", "ORG", "O", "MISC"], ["O", "MISC", "ORG", "PER"], ), ( ["O", "B-PER", "I-MISC", "B-ORG", "I-PER", "B-MISC", "I-ORG"], ["O", "B-MISC", "B-ORG", "B-PER", "I-MISC", "I-ORG", "I-PER"], ), ], ) def test_order_tag_list( self, tag_list: List[str], tag_list_ordered: List[str], ) -> None: test_tag_list_ordered = order_tag_list(tag_list) assert ( test_tag_list_ordered == tag_list_ordered ), f"test_tag_list_ordered = {test_tag_list_ordered} != {tag_list_ordered}" #################################################################################################################### @pytest.mark.parametrize( "tag_list_bio, " "tag_list", [ ( ["O", "B-MISC", "B-ORG", "B-PER", "I-MISC", "I-ORG", "I-PER"], ["O", "MISC", "ORG", "PER"], ), ( # if applied to plain tag_list, nothing happens ["O", "MISC", "ORG", "PER"], ["O", "MISC", "ORG", "PER"], ), ], ) def test_convert_tag_list_bio2plain( self, tag_list_bio: List[str], tag_list: List[str], ) -> None: test_tag_list = convert_tag_list_bio2plain(tag_list_bio) assert ( test_tag_list == tag_list ), f"test_tag_list_ordered = {test_tag_list} != {tag_list}" if __name__ == "__main__": test_csv = TestCsvReaderAndDataProcessor() test_csv.tests() test_input_examples_to_tensors = TestInputExamplesToTensorsAndBertDataset() test_input_examples_to_tensors.tests() test_misc = TestMisc() test_misc.test_ensure_completeness_in_case_of_bio_tags() test_misc.test_order_tag_list() test_misc.test_convert_tag_list_bio2plain()
import pytest import torch from transformers import AutoTokenizer from typing import List, Dict from pkg_resources import resource_filename from nerblackbox.modules.ner_training.data_preprocessing.tools.csv_reader import ( CsvReader, ) from nerblackbox.modules.ner_training.data_preprocessing.tools.input_example import ( InputExample, ) from nerblackbox.modules.ner_training.data_preprocessing.tools.input_examples_to_tensors import ( InputExamplesToTensors, ) from nerblackbox.modules.ner_training.data_preprocessing.tools.bert_dataset import ( BertDataset, ) from nerblackbox.modules.ner_training.data_preprocessing.data_preprocessor import ( DataPreprocessor, order_tag_list, convert_tag_list_bio2plain, ) from nerblackbox.modules.ner_training.data_preprocessing.tools.utils import ( EncodingsKeys, ) from nerblackbox.tests.utils import PseudoDefaultLogger from nerblackbox.modules.ner_training.ner_model import NEWLINE_TOKENS tokenizer = AutoTokenizer.from_pretrained( "af-ai-center/bert-base-swedish-uncased", do_lower_case=False, additional_special_tokens=NEWLINE_TOKENS, use_fast=True, ) csv_reader = CsvReader( path=resource_filename("nerblackbox", "tests/test_data"), tokenizer=tokenizer, do_lower_case=False, csv_file_separator="\t", default_logger=None, ) data_preprocessor = DataPreprocessor( tokenizer=tokenizer, do_lower_case=False, default_logger=PseudoDefaultLogger(), max_seq_length=4, ) ######################################################################################################################## ######################################################################################################################## ######################################################################################################################## class TestCsvReaderAndDataProcessor: #################################################################################################################### @pytest.mark.parametrize( "tag_list, " "input_examples", [ ( ["O", "PER", "ORG", "MISC"], { "train": [ InputExample( guid="", text="På skidspår.se kan längdskidåkare själva betygsätta förhållandena i spåren .", tags="O MISC O O O O O O O O", ), ], "val": [ InputExample( guid="", text="Fastigheten är ett landmärke designad av arkitekten Robert Stern .", tags="O O O O O O O PER PER O", ), ], "test": [ InputExample( guid="", text="Apple noteras för 85,62 poäng , vilket är den högsta siffran någonsin i undersökningen .", tags="ORG O O O O O O O O O O O O O O", ), ], "predict": [ InputExample( guid="", text="På skidspår.se kan längdskidåkare själva betygsätta förhållandena i spåren .", tags="O MISC O O O O O O O O", ), InputExample( guid="", text="Fastigheten är ett landmärke designad av arkitekten Robert Stern .", tags="O O O O O O O PER PER O", ), InputExample( guid="", text="Apple noteras för 85,62 poäng , vilket är den högsta siffran någonsin i undersökningen .", tags="ORG O O O O O O O O O O O O O O", ), ], }, ), ], ) def tests( self, tag_list: List[str], input_examples: Dict[str, List[InputExample]], ) -> None: ################################## # 1. CsvReader ################################## for phase in ["train", "val", "test"]: test_input_examples = csv_reader.get_input_examples(phase) assert ( len(test_input_examples) == 1 or 2 ), f"ERROR! len(test_input_examples) = {len(test_input_examples)} should be 1 or 2." assert ( test_input_examples[0].text == input_examples[phase][0].text ), f"phase = {phase}: test_input_examples_text = {test_input_examples[0].text} != {input_examples[phase][0].text}" assert ( test_input_examples[0].tags == input_examples[phase][0].tags ), f"phase = {phase}: test_input_examples_tags = {test_input_examples[0].tags} != {input_examples[phase][0].tags}" ################################## # 2. DataProcessor ################################## # a. get_input_examples_train test_input_examples, test_tag_list = data_preprocessor.get_input_examples_train( prune_ratio={"train": 0.5, "val": 1.0, "test": 1.0}, dataset_name=None, ) assert set(test_tag_list) == set( tag_list ), f"test_tag_list = {test_tag_list} != {tag_list}" for phase in ["train", "val", "test"]: assert ( len(test_input_examples[phase]) == 1 ), f"ERROR! len(test_input_examples[{phase}]) = {len(test_input_examples[phase])} should be 1." assert ( test_input_examples[phase][0].text == input_examples[phase][0].text ), f"phase = {phase}: test_input_examples.text = {test_input_examples[phase].text} != {input_examples[phase][0].text}" assert ( test_input_examples[phase][0].tags == input_examples[phase][0].tags ), f"phase = {phase}: test_input_examples.tags = {test_input_examples[phase].tags} != {input_examples[phase][0].tags}" # b. get_input_examples_predict test_sentences = [ elem.text for v in test_input_examples.values() for elem in v ] # retrieve example sentences test_input_examples_predict = data_preprocessor.get_input_examples_predict( test_sentences )["predict"] assert len(test_input_examples_predict) == len( input_examples["predict"] ), f"len(test_input_examples_predict) = {len(test_input_examples_predict)} != {len(input_examples['predict'])}" for (test_input_example_predict, true_input_example_predict) in zip( test_input_examples_predict, input_examples["predict"] ): assert ( test_input_example_predict.text == true_input_example_predict.text ), f"test_input_example_predict.text = {test_input_example_predict.text} != {true_input_example_predict.text}" true_input_example_predict_tags = " ".join( "O" for _ in range(len(true_input_example_predict.text.split())) ) assert ( test_input_example_predict.tags == true_input_example_predict_tags ), f"test_input_example_predict.tags = {test_input_example_predict.tags} != {true_input_example_predict_tags}" # c. to_dataloader dataloader = data_preprocessor.to_dataloader( input_examples, tag_list, batch_size=1 ) for key in ["train", "val", "test", "predict"]: assert ( key in dataloader.keys() ), f"key = {key} not in dataloader.keys() = {dataloader.keys()}" # TODO: further testing ######################################################################################################################## ######################################################################################################################## ######################################################################################################################## class TestInputExamplesToTensorsAndBertDataset: @pytest.mark.parametrize( "texts, " "labels, " "tag_tuple, " "max_seq_length, " "true_input_ids, " "true_attention_mask, " "true_token_type_ids, " "true_tag_ids, " "true_input_tokens", [ # 1. single example: no truncation ( ["arbetsförmedlingen ai-center finns i stockholm"], ["ORG ORG O O LOC"], ("O", "ORG", "LOC"), 12, torch.tensor( [[101, 7093, 2842, 8126, 1011, 5410, 1121, 1045, 1305, 102, 0, 0]] ), torch.tensor([[1] * 10 + [0] * 2]), torch.tensor([[0] * 12]), torch.tensor( [[-100, 1, -100, 1, -100, -100, 0, 0, 2, -100, -100, -100]] ), [ [ "[CLS]", "arbetsförmedl", "##ingen", "ai", "-", "center", "finns", "i", "stockholm", "[SEP]", ] + ["[PAD]"] * 2 ], ), # 2. single example: no truncation, [NEWLINE] ( ["arbetsförmedlingen ai-center [NEWLINE] finns i stockholm"], ["ORG ORG O O O LOC"], ("O", "ORG", "LOC"), 12, torch.tensor( [ [ 101, 7093, 2842, 8126, 1011, 5410, 30523, 1121, 1045, 1305, 102, 0, ] ] ), torch.tensor([[1] * 11 + [0] * 1]), torch.tensor([[0] * 12]), torch.tensor([[-100, 1, -100, 1, -100, -100, 0, 0, 0, 2, -100, -100]]), [ [ "[CLS]", "arbetsförmedl", "##ingen", "ai", "-", "center", "[NEWLINE]", "finns", "i", "stockholm", "[SEP]", ] + ["[PAD]"] * 1 ], ), # 3. single example: truncation ( ["arbetsförmedlingen ai-center finns i stockholm"], ["ORG ORG O O LOC"], ("O", "ORG", "LOC"), 4, torch.tensor( [ [101, 7093, 2842, 102], [101, 8126, 1011, 102], [101, 5410, 1121, 102], [101, 1045, 1305, 102], ] ), torch.tensor( [ [1] * 4, [1] * 4, [1] * 4, [1] * 4, ] ), torch.tensor( [ [0] * 4, [0] * 4, [0] * 4, [0] * 4, ] ), torch.tensor( [ [-100, 1, -100, -100], [-100, 1, -100, -100], [-100, -100, 0, -100], [-100, 0, 2, -100], ] ), [ ["[CLS]", "arbetsförmedl", "##ingen", "[SEP]"], ["[CLS]", "ai", "-", "[SEP]"], ["[CLS]", "center", "finns", "[SEP]"], ["[CLS]", "i", "stockholm", "[SEP]"], ], ), # 4. two examples: truncation ( ["arbetsförmedlingen ai-center", "finns i stockholm"], ["ORG ORG", "O O LOC"], ("O", "ORG", "LOC"), 4, torch.tensor( [ [101, 7093, 2842, 102], [101, 8126, 1011, 102], [101, 5410, 102, 0], [101, 1121, 1045, 102], [101, 1305, 102, 0], ] ), torch.tensor( [ [1] * 4, [1] * 4, [1] * 3 + [0], [1] * 4, [1] * 3 + [0], ] ), torch.tensor( [ [0] * 4, [0] * 4, [0] * 4, [0] * 4, [0] * 4, ] ), torch.tensor( [ [-100, 1, -100, -100], [-100, 1, -100, -100], [-100, -100, -100, -100], [-100, 0, 0, -100], [-100, 2, -100, -100], ] ), [ ["[CLS]", "arbetsförmedl", "##ingen", "[SEP]"], ["[CLS]", "ai", "-", "[SEP]"], ["[CLS]", "center", "[SEP]", "[PAD]"], ["[CLS]", "finns", "i", "[SEP]"], ["[CLS]", "stockholm", "[SEP]", "[PAD]"], ], ), ], ) def tests( self, texts: List[str], labels: List[str], tag_tuple: List[str], max_seq_length: int, true_input_ids: torch.Tensor, true_attention_mask: torch.Tensor, true_token_type_ids: torch.Tensor, true_tag_ids: torch.Tensor, true_input_tokens: torch.Tensor, ) -> None: ################################## # 1. InputExamplesToTensors ################################## input_examples = [ InputExample( guid="", text=text, tags=label, ) for text, label in zip(texts, labels) ] input_examples_to_tensors = InputExamplesToTensors( tokenizer=tokenizer, max_seq_length=max_seq_length, tag_tuple=tuple(tag_tuple), default_logger=PseudoDefaultLogger(), ) encodings = input_examples_to_tensors(input_examples, predict=False) input_tokens = [ tokenizer.convert_ids_to_tokens(input_ids_single) for input_ids_single in encodings["input_ids"] ] for (string, true) in zip( EncodingsKeys, [true_input_ids, true_attention_mask, true_token_type_ids, true_tag_ids], ): assert torch.all( torch.eq(encodings[string], true) ), f"{string} = {encodings[string]} != {true}" for (string, true, _test) in zip( ["input_tokens"], [true_input_tokens], [input_tokens], ): assert _test == true, f"{string} = {_test} != {true}" ################################## # 2. BertDataset ################################## data = BertDataset( encodings=encodings ) # data[j] = 4 torch tensors corresponding to EncodingKeys assert len(data) >= len( texts ), f"len(data) = {len(data)} < {len(texts)} = len(texts)" for i, (string, true) in enumerate( zip( ["input_ids", "attention_mask", "token_type_ids", "tag_ids"], [ true_input_ids, true_attention_mask, true_token_type_ids, true_tag_ids, ], ) ): for j in range(len(true)): assert torch.all( torch.eq(data[j][i], true[j]) ), f"{string} = {data[j][i]} != {true[j]}" ######################################################################################################################## ######################################################################################################################## ######################################################################################################################## class TestMisc: #################################################################################################################### @pytest.mark.parametrize( "tag_list, " "returned_tag_list", [ ( ["O", "PER", "ORG", "MISC"], ["O", "PER", "ORG", "MISC"], ), ( ["O", "B-PER", "B-ORG", "B-MISC"], ["O", "B-PER", "B-ORG", "B-MISC", "I-PER", "I-ORG", "I-MISC"], ), ], ) def test_ensure_completeness_in_case_of_bio_tags( self, tag_list: List[str], returned_tag_list: List[str], ) -> None: test_returned_tag_list = ( data_preprocessor._ensure_completeness_in_case_of_bio_tags( tag_list=tag_list ) ) assert ( test_returned_tag_list == returned_tag_list ), f"test_returned_tag_list = {test_returned_tag_list} != {returned_tag_list}" #################################################################################################################### @pytest.mark.parametrize( "tag_list, " "tag_list_ordered", [ ( ["O", "PER", "ORG", "MISC"], ["O", "MISC", "ORG", "PER"], ), ( ["PER", "ORG", "O", "MISC"], ["O", "MISC", "ORG", "PER"], ), ( ["O", "B-PER", "I-MISC", "B-ORG", "I-PER", "B-MISC", "I-ORG"], ["O", "B-MISC", "B-ORG", "B-PER", "I-MISC", "I-ORG", "I-PER"], ), ], ) def test_order_tag_list( self, tag_list: List[str], tag_list_ordered: List[str], ) -> None: test_tag_list_ordered = order_tag_list(tag_list) assert ( test_tag_list_ordered == tag_list_ordered ), f"test_tag_list_ordered = {test_tag_list_ordered} != {tag_list_ordered}" #################################################################################################################### @pytest.mark.parametrize( "tag_list_bio, " "tag_list", [ ( ["O", "B-MISC", "B-ORG", "B-PER", "I-MISC", "I-ORG", "I-PER"], ["O", "MISC", "ORG", "PER"], ), ( # if applied to plain tag_list, nothing happens ["O", "MISC", "ORG", "PER"], ["O", "MISC", "ORG", "PER"], ), ], ) def test_convert_tag_list_bio2plain( self, tag_list_bio: List[str], tag_list: List[str], ) -> None: test_tag_list = convert_tag_list_bio2plain(tag_list_bio) assert ( test_tag_list == tag_list ), f"test_tag_list_ordered = {test_tag_list} != {tag_list}" if __name__ == "__main__": test_csv = TestCsvReaderAndDataProcessor() test_csv.tests() test_input_examples_to_tensors = TestInputExamplesToTensorsAndBertDataset() test_input_examples_to_tensors.tests() test_misc = TestMisc() test_misc.test_ensure_completeness_in_case_of_bio_tags() test_misc.test_order_tag_list() test_misc.test_convert_tag_list_bio2plain()
#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Authors: # - Paul Nilsson, paul.nilsson@cern.ch, 2019-2022 from __future__ import print_function # Python 2, 2to3 complains about this import functools import signal import threading import traceback import queue from os import getpid from time import time from sys import stderr from collections import namedtuple from shutil import rmtree from pilot.common.exception import ExcThread from pilot.control import job, data, monitor from pilot.util.constants import SUCCESS, PILOT_KILL_SIGNAL, MAX_KILL_WAIT_TIME from pilot.util.processes import kill_processes, threads_aborted from pilot.util.timing import add_to_pilot_timing import logging logger = logging.getLogger(__name__) def interrupt(args, signum, frame): """ Interrupt function on the receiving end of kill signals. This function is forwarded any incoming signals (SIGINT, SIGTERM, etc) and will set abort_job which instructs the threads to abort the job. :param args: pilot arguments. :param signum: signal. :param frame: stack/execution frame pointing to the frame that was interrupted by the signal. :return: """ sig = [v for v, k in list(signal.__dict__.items()) if k == signum][0] args.signal_counter += 1 # keep track of when first kill signal arrived, any stuck loops should abort at a defined cut off time if args.kill_time == 0: args.kill_time = int(time()) max_kill_wait_time = MAX_KILL_WAIT_TIME + 60 # add another minute of grace to let threads finish current_time = int(time()) if args.kill_time and current_time - args.kill_time > max_kill_wait_time: logger.warning('passed maximum waiting time after first kill signal - will commit suicide - farewell') try: rmtree(args.sourcedir) except Exception as e: logger.warning(e) logging.shutdown() kill_processes(getpid()) add_to_pilot_timing('0', PILOT_KILL_SIGNAL, time(), args) add_to_pilot_timing('1', PILOT_KILL_SIGNAL, time(), args) logger.warning('caught signal: %s in FRAME=\n%s', sig, '\n'.join(traceback.format_stack(frame))) args.signal = sig logger.warning('will instruct threads to abort and update the server') args.abort_job.set() logger.warning('setting graceful stop (in case it was not set already)') args.graceful_stop.set() logger.warning('waiting for threads to finish') args.job_aborted.wait() def run(args): """ Main execution function for the stage-in workflow. The function sets up the internal queues which handle the flow of jobs. :param args: pilot arguments. :returns: traces. """ logger.info('setting up signal handling') signal.signal(signal.SIGINT, functools.partial(interrupt, args)) signal.signal(signal.SIGTERM, functools.partial(interrupt, args)) signal.signal(signal.SIGQUIT, functools.partial(interrupt, args)) signal.signal(signal.SIGSEGV, functools.partial(interrupt, args)) signal.signal(signal.SIGXCPU, functools.partial(interrupt, args)) signal.signal(signal.SIGUSR1, functools.partial(interrupt, args)) signal.signal(signal.SIGBUS, functools.partial(interrupt, args)) logger.info('setting up queues') queues = namedtuple('queues', ['jobs', 'data_in', 'data_out', 'current_data_in', 'validated_jobs', 'finished_jobs', 'finished_data_in', 'finished_data_out', 'completed_jobids', 'failed_jobs', 'failed_data_in', 'failed_data_out', 'completed_jobs']) queues.jobs = queue.Queue() queues.data_in = queue.Queue() queues.data_out = queue.Queue() queues.current_data_in = queue.Queue() queues.validated_jobs = queue.Queue() queues.finished_jobs = queue.Queue() queues.finished_data_in = queue.Queue() queues.finished_data_out = queue.Queue() queues.failed_jobs = queue.Queue() queues.failed_data_in = queue.Queue() queues.failed_data_out = queue.Queue() queues.completed_jobs = queue.Queue() queues.completed_jobids = queue.Queue() logger.info('setting up tracing') traces = namedtuple('traces', ['pilot']) traces.pilot = {'state': SUCCESS, 'nr_jobs': 0, 'error_code': 0, 'command': None} # define the threads targets = {'job': job.control, 'data': data.control, 'monitor': monitor.control} threads = [ExcThread(bucket=queue.Queue(), target=target, kwargs={'queues': queues, 'traces': traces, 'args': args}, name=name) for name, target in list(targets.items())] logger.info('starting threads') [thread.start() for thread in threads] logger.info('waiting for interrupts') thread_count = threading.activeCount() while threading.activeCount() > 1: for thread in threads: bucket = thread.get_bucket() try: exc = bucket.get(block=False) except queue.Empty: pass else: exc_type, exc_obj, exc_trace = exc # deal with the exception print('received exception from bucket queue in generic workflow: %s' % exc_obj, file=stderr) thread.join(0.1) abort = False if thread_count != threading.activeCount(): # has all threads finished? abort = threads_aborted(abort_at=1) if abort: break logger.info(f"end of stager workflow (traces error code: {traces.pilot["error_code"]})") return traces
#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Authors: # - Paul Nilsson, paul.nilsson@cern.ch, 2019-2022 from __future__ import print_function # Python 2, 2to3 complains about this import functools import signal import threading import traceback import queue from os import getpid from time import time from sys import stderr from collections import namedtuple from shutil import rmtree from pilot.common.exception import ExcThread from pilot.control import job, data, monitor from pilot.util.constants import SUCCESS, PILOT_KILL_SIGNAL, MAX_KILL_WAIT_TIME from pilot.util.processes import kill_processes, threads_aborted from pilot.util.timing import add_to_pilot_timing import logging logger = logging.getLogger(__name__) def interrupt(args, signum, frame): """ Interrupt function on the receiving end of kill signals. This function is forwarded any incoming signals (SIGINT, SIGTERM, etc) and will set abort_job which instructs the threads to abort the job. :param args: pilot arguments. :param signum: signal. :param frame: stack/execution frame pointing to the frame that was interrupted by the signal. :return: """ sig = [v for v, k in list(signal.__dict__.items()) if k == signum][0] args.signal_counter += 1 # keep track of when first kill signal arrived, any stuck loops should abort at a defined cut off time if args.kill_time == 0: args.kill_time = int(time()) max_kill_wait_time = MAX_KILL_WAIT_TIME + 60 # add another minute of grace to let threads finish current_time = int(time()) if args.kill_time and current_time - args.kill_time > max_kill_wait_time: logger.warning('passed maximum waiting time after first kill signal - will commit suicide - farewell') try: rmtree(args.sourcedir) except Exception as e: logger.warning(e) logging.shutdown() kill_processes(getpid()) add_to_pilot_timing('0', PILOT_KILL_SIGNAL, time(), args) add_to_pilot_timing('1', PILOT_KILL_SIGNAL, time(), args) logger.warning('caught signal: %s in FRAME=\n%s', sig, '\n'.join(traceback.format_stack(frame))) args.signal = sig logger.warning('will instruct threads to abort and update the server') args.abort_job.set() logger.warning('setting graceful stop (in case it was not set already)') args.graceful_stop.set() logger.warning('waiting for threads to finish') args.job_aborted.wait() def run(args): """ Main execution function for the stage-in workflow. The function sets up the internal queues which handle the flow of jobs. :param args: pilot arguments. :returns: traces. """ logger.info('setting up signal handling') signal.signal(signal.SIGINT, functools.partial(interrupt, args)) signal.signal(signal.SIGTERM, functools.partial(interrupt, args)) signal.signal(signal.SIGQUIT, functools.partial(interrupt, args)) signal.signal(signal.SIGSEGV, functools.partial(interrupt, args)) signal.signal(signal.SIGXCPU, functools.partial(interrupt, args)) signal.signal(signal.SIGUSR1, functools.partial(interrupt, args)) signal.signal(signal.SIGBUS, functools.partial(interrupt, args)) logger.info('setting up queues') queues = namedtuple('queues', ['jobs', 'data_in', 'data_out', 'current_data_in', 'validated_jobs', 'finished_jobs', 'finished_data_in', 'finished_data_out', 'completed_jobids', 'failed_jobs', 'failed_data_in', 'failed_data_out', 'completed_jobs']) queues.jobs = queue.Queue() queues.data_in = queue.Queue() queues.data_out = queue.Queue() queues.current_data_in = queue.Queue() queues.validated_jobs = queue.Queue() queues.finished_jobs = queue.Queue() queues.finished_data_in = queue.Queue() queues.finished_data_out = queue.Queue() queues.failed_jobs = queue.Queue() queues.failed_data_in = queue.Queue() queues.failed_data_out = queue.Queue() queues.completed_jobs = queue.Queue() queues.completed_jobids = queue.Queue() logger.info('setting up tracing') traces = namedtuple('traces', ['pilot']) traces.pilot = {'state': SUCCESS, 'nr_jobs': 0, 'error_code': 0, 'command': None} # define the threads targets = {'job': job.control, 'data': data.control, 'monitor': monitor.control} threads = [ExcThread(bucket=queue.Queue(), target=target, kwargs={'queues': queues, 'traces': traces, 'args': args}, name=name) for name, target in list(targets.items())] logger.info('starting threads') [thread.start() for thread in threads] logger.info('waiting for interrupts') thread_count = threading.activeCount() while threading.activeCount() > 1: for thread in threads: bucket = thread.get_bucket() try: exc = bucket.get(block=False) except queue.Empty: pass else: exc_type, exc_obj, exc_trace = exc # deal with the exception print('received exception from bucket queue in generic workflow: %s' % exc_obj, file=stderr) thread.join(0.1) abort = False if thread_count != threading.activeCount(): # has all threads finished? abort = threads_aborted(abort_at=1) if abort: break logger.info(f"end of stager workflow (traces error code: {traces.pilot['error_code']})") return traces
from abc import ABC, abstractmethod from inspect import isfunction from types import FunctionType from typing import ( TYPE_CHECKING, Any, Callable, Dict, Generic, List, Mapping, Optional, Type, TypeVar, Union, cast, ) import dagster._check as check from dagster.core.asset_defs.source_asset import SourceAsset from dagster.core.errors import DagsterInvalidDefinitionError, DagsterInvariantViolationError from dagster.utils import merge_dicts from .events import AssetKey from .executor_definition import ExecutorDefinition from .graph_definition import GraphDefinition, SubselectedGraphDefinition from .job_definition import JobDefinition from .partition import PartitionScheduleDefinition, PartitionSetDefinition from .pipeline_definition import PipelineDefinition from .schedule_definition import ScheduleDefinition from .sensor_definition import SensorDefinition from .unresolved_asset_job_definition import UnresolvedAssetJobDefinition from .utils import check_valid_name if TYPE_CHECKING: from dagster.core.asset_defs.asset_group import AssetGroup from dagster.core.asset_defs.assets import AssetsDefinition VALID_REPOSITORY_DATA_DICT_KEYS = { "pipelines", "partition_sets", "schedules", "sensors", "jobs", } RepositoryLevelDefinition = TypeVar( "RepositoryLevelDefinition", PipelineDefinition, JobDefinition, PartitionSetDefinition, ScheduleDefinition, SensorDefinition, ) class _CacheingDefinitionIndex(Generic[RepositoryLevelDefinition]): def __init__( self, definition_class: Type[RepositoryLevelDefinition], definition_class_name: str, definition_kind: str, definitions: Mapping[ str, Union[RepositoryLevelDefinition, Callable[[], RepositoryLevelDefinition]] ], validation_fn: Callable[[RepositoryLevelDefinition], RepositoryLevelDefinition], lazy_definitions_fn: Optional[Callable[[], List[RepositoryLevelDefinition]]] = None, ): """ Args: definitions: A dictionary of definition names to definitions or functions that load definitions. lazy_definitions_fn: A function for loading a list of definitions whose names are not even known until loaded. """ for key, definition in definitions.items(): check.invariant( isinstance(definition, definition_class) or callable(definition), "Bad definition for {definition_kind} {key}: must be {definition_class_name} or " "callable, got {type_}".format( definition_kind=definition_kind, key=key, definition_class_name=definition_class_name, type_=type(definition), ), ) self._definition_class: Type[RepositoryLevelDefinition] = definition_class self._definition_class_name = definition_class_name self._definition_kind = definition_kind self._validation_fn: Callable[ [RepositoryLevelDefinition], RepositoryLevelDefinition ] = validation_fn self._definitions: Mapping[ str, Union[RepositoryLevelDefinition, Callable[[], RepositoryLevelDefinition]] ] = definitions self._definition_cache: Dict[str, RepositoryLevelDefinition] = {} self._definition_names: Optional[List[str]] = None self._lazy_definitions_fn: Callable[ [], List[RepositoryLevelDefinition] ] = lazy_definitions_fn or (lambda: []) self._lazy_definitions: Optional[List[RepositoryLevelDefinition]] = None self._all_definitions: Optional[List[RepositoryLevelDefinition]] = None def _get_lazy_definitions(self) -> List[RepositoryLevelDefinition]: if self._lazy_definitions is None: self._lazy_definitions = self._lazy_definitions_fn() for definition in self._lazy_definitions: self._validate_and_cache_definition(definition, definition.name) return self._lazy_definitions def get_definition_names(self) -> List[str]: if self._definition_names: return self._definition_names lazy_names = [] for definition in self._get_lazy_definitions(): strict_definition = self._definitions.get(definition.name) if strict_definition: check.invariant( strict_definition == definition, f"Duplicate definition found for {definition.name}", ) else: lazy_names.append(definition.name) self._definition_names = list(self._definitions.keys()) + lazy_names return self._definition_names def has_definition(self, definition_name: str) -> bool: check.str_param(definition_name, "definition_name") return definition_name in self.get_definition_names() def get_all_definitions(self) -> List[RepositoryLevelDefinition]: if self._all_definitions is not None: return self._all_definitions self._all_definitions = list( sorted( map(self.get_definition, self.get_definition_names()), key=lambda definition: definition.name, ) ) return self._all_definitions def get_definition(self, definition_name: str) -> RepositoryLevelDefinition: check.str_param(definition_name, "definition_name") if not self.has_definition(definition_name): raise DagsterInvariantViolationError( "Could not find {definition_kind} '{definition_name}'. Found: " "{found_names}.".format( definition_kind=self._definition_kind, definition_name=definition_name, found_names=", ".join( [ "'{found_name}'".format(found_name=found_name) for found_name in self.get_definition_names() ] ), ) ) if definition_name in self._definition_cache: return self._definition_cache[definition_name] definition_source = self._definitions[definition_name] if isinstance(definition_source, self._definition_class): self._definition_cache[definition_name] = self._validation_fn(definition_source) return definition_source else: definition = cast(Callable, definition_source)() self._validate_and_cache_definition(definition, definition_name) return definition def _validate_and_cache_definition( self, definition: RepositoryLevelDefinition, definition_dict_key: str ): check.invariant( isinstance(definition, self._definition_class), "Bad constructor for {definition_kind} {definition_name}: must return " "{definition_class_name}, got value of type {type_}".format( definition_kind=self._definition_kind, definition_name=definition_dict_key, definition_class_name=self._definition_class_name, type_=type(definition), ), ) check.invariant( definition.name == definition_dict_key, "Bad constructor for {definition_kind} '{definition_name}': name in " "{definition_class_name} does not match: got '{definition_def_name}'".format( definition_kind=self._definition_kind, definition_name=definition_dict_key, definition_class_name=self._definition_class_name, definition_def_name=definition.name, ), ) self._definition_cache[definition_dict_key] = self._validation_fn(definition) class RepositoryData(ABC): """ Users should usually rely on the :py:func:`@repository <repository>` decorator to create new repositories, which will in turn call the static constructors on this class. However, users may subclass :py:class:`RepositoryData` for fine-grained control over access to and lazy creation of repository members. """ @abstractmethod def get_all_pipelines(self) -> List[PipelineDefinition]: """Return all pipelines/jobs in the repository as a list. Returns: List[PipelineDefinition]: All pipelines/jobs in the repository. """ def get_all_jobs(self) -> List[JobDefinition]: """Return all jobs in the repository as a list. Returns: List[JobDefinition]: All jobs in the repository. """ return [job for job in self.get_all_pipelines() if isinstance(job, JobDefinition)] def get_pipeline_names(self) -> List[str]: """Get the names of all pipelines/jobs in the repository. Returns: List[str] """ return [pipeline_def.name for pipeline_def in self.get_all_pipelines()] def get_job_names(self) -> List[str]: """Get the names of all jobs in the repository. Returns: List[str] """ return [job_def.name for job_def in self.get_all_jobs()] def has_pipeline(self, pipeline_name: str) -> bool: """Check if a pipeline/job with a given name is present in the repository. Args: pipeline_name (str): The name of the pipeline/job. Returns: bool """ return pipeline_name in self.get_pipeline_names() def has_job(self, job_name: str) -> bool: """Check if a job with a given name is present in the repository. Args: job_name (str): The name of the job. Returns: bool """ return job_name in self.get_job_names() def get_pipeline(self, pipeline_name) -> PipelineDefinition: """Get a pipeline/job by name. Args: pipeline_name (str): Name of the pipeline/job to retrieve. Returns: PipelineDefinition: The pipeline/job definition corresponding to the given name. """ pipelines_with_name = [ pipeline for pipeline in self.get_all_pipelines() if pipeline.name == pipeline_name ] if not pipelines_with_name: raise DagsterInvariantViolationError( f"Could not find pipeline/job {pipeline_name} in repository" ) return pipelines_with_name[0] def get_job(self, job_name: str) -> JobDefinition: """Get a job by name. Args: job_name (str): Name of the job to retrieve. Returns: JobDefinition: The job definition corresponding to the given name. """ match = next(job for job in self.get_all_jobs() if job.name == job_name) if match is None: raise DagsterInvariantViolationError(f"Could not find job {job_name} in repository") return match def get_partition_set_names(self): """Get the names of all partition sets in the repository. Returns: List[str] """ return [partition_set.name for partition_set in self.get_all_partition_sets()] def has_partition_set(self, partition_set_name: str) -> bool: """Check if a partition set with a given name is present in the repository. Args: partition_set_name (str): The name of the partition set. Returns: bool """ return partition_set_name in self.get_partition_set_names() def get_all_partition_sets(self) -> List[PartitionSetDefinition]: """Return all partition sets in the repository as a list. Returns: List[PartitionSetDefinition]: All partition sets in the repository. """ return [] def get_partition_set(self, partition_set_name: str) -> PartitionSetDefinition: """Get a partition set by name. Args: partition_set_name (str): Name of the partition set to retrieve. Returns: PartitionSetDefinition: The partition set definition corresponding to the given name. """ partition_sets_with_name = [ partition_set for partition_set in self.get_all_partition_sets() if partition_set.name == partition_set_name ] if not partition_sets_with_name: raise DagsterInvariantViolationError( f"Could not find partition set {partition_set_name} in repository" ) return partition_sets_with_name[0] def get_schedule_names(self) -> List[str]: """Get the names of all schedules in the repository. Returns: List[str] """ return [schedule.name for schedule in self.get_all_schedules()] def get_all_schedules(self) -> List[ScheduleDefinition]: """Return all schedules in the repository as a list. Returns: List[ScheduleDefinition]: All pipelines in the repository. """ return [] def get_schedule(self, schedule_name: str) -> ScheduleDefinition: """Get a schedule by name. args: schedule_name (str): name of the schedule to retrieve. Returns: ScheduleDefinition: The schedule definition corresponding to the given name. """ schedules_with_name = [ schedule for schedule in self.get_all_schedules() if schedule.name == schedule_name ] if not schedules_with_name: raise DagsterInvariantViolationError( f"Could not find schedule {schedule_name} in repository" ) return schedules_with_name[0] def has_schedule(self, schedule_name: str) -> bool: return schedule_name in self.get_schedule_names() def get_all_sensors(self) -> List[SensorDefinition]: return [] def get_sensor_names(self) -> List[str]: return [sensor.name for sensor in self.get_all_sensors()] def get_sensor(self, sensor_name: str) -> SensorDefinition: sensors_with_name = [ sensor for sensor in self.get_all_sensors() if sensor.name == sensor_name ] if not sensors_with_name: raise DagsterInvariantViolationError( f"Could not find sensor {sensor_name} in repository" ) return sensors_with_name[0] def has_sensor(self, sensor_name: str) -> bool: return sensor_name in self.get_sensor_names() def get_source_assets_by_key(self) -> Mapping[AssetKey, SourceAsset]: return {} def load_all_definitions(self): # force load of all lazy constructed code artifacts self.get_all_pipelines() self.get_all_jobs() self.get_all_partition_sets() self.get_all_schedules() self.get_all_sensors() self.get_source_assets_by_key() T = TypeVar("T") Resolvable = Callable[[], T] class CachingRepositoryData(RepositoryData): """Default implementation of RepositoryData used by the :py:func:`@repository <repository>` decorator.""" _all_jobs: Optional[List[JobDefinition]] _all_pipelines: Optional[List[PipelineDefinition]] def __init__( self, pipelines: Mapping[str, Union[PipelineDefinition, Resolvable[PipelineDefinition]]], jobs: Mapping[str, Union[JobDefinition, Resolvable[JobDefinition]]], partition_sets: Mapping[ str, Union[PartitionSetDefinition, Resolvable[PartitionSetDefinition]] ], schedules: Mapping[str, Union[ScheduleDefinition, Resolvable[ScheduleDefinition]]], sensors: Mapping[str, Union[SensorDefinition, Resolvable[SensorDefinition]]], source_assets_by_key: Mapping[AssetKey, SourceAsset], ): """Constructs a new CachingRepositoryData object. You may pass pipeline, job, partition_set, and schedule definitions directly, or you may pass callables with no arguments that will be invoked to lazily construct definitions when accessed by name. This can be helpful for performance when there are many definitions in a repository, or when constructing the definitions is costly. Note that when lazily constructing a definition, the name of the definition must match its key in its dictionary index, or a :py:class:`DagsterInvariantViolationError` will be thrown at retrieval time. Args: pipelines (Mapping[str, Union[PipelineDefinition, Callable[[], PipelineDefinition]]]): The pipeline definitions belonging to the repository. jobs (Mapping[str, Union[JobDefinition, Callable[[], JobDefinition]]]): The job definitions belonging to the repository. partition_sets (Mapping[str, Union[PartitionSetDefinition, Callable[[], PartitionSetDefinition]]]): The partition sets belonging to the repository. schedules (Mapping[str, Union[ScheduleDefinition, Callable[[], ScheduleDefinition]]]): The schedules belonging to the repository. sensors (Mapping[str, Union[SensorDefinition, Callable[[], SensorDefinition]]]): The sensors belonging to a repository. source_assets_by_key (Mapping[AssetKey, SourceAsset]): The source assets belonging to a repository. """ check.mapping_param( pipelines, "pipelines", key_type=str, value_type=(PipelineDefinition, FunctionType) ) check.mapping_param(jobs, "jobs", key_type=str, value_type=(JobDefinition, FunctionType)) check.mapping_param( partition_sets, "partition_sets", key_type=str, value_type=(PartitionSetDefinition, FunctionType), ) check.mapping_param( schedules, "schedules", key_type=str, value_type=(ScheduleDefinition, FunctionType) ) check.mapping_param( sensors, "sensors", key_type=str, value_type=(SensorDefinition, FunctionType) ) check.mapping_param( source_assets_by_key, "source_assets_by_key", key_type=AssetKey, value_type=SourceAsset ) self._pipelines = _CacheingDefinitionIndex( PipelineDefinition, "PipelineDefinition", "pipeline", pipelines, self._validate_pipeline, ) self._jobs = _CacheingDefinitionIndex( JobDefinition, "JobDefinition", "job", jobs, self._validate_job, ) self._schedules = _CacheingDefinitionIndex( ScheduleDefinition, "ScheduleDefinition", "schedule", schedules, self._validate_schedule, ) schedule_partition_sets = [ schedule.get_partition_set() for schedule in self._schedules.get_all_definitions() if isinstance(schedule, PartitionScheduleDefinition) ] self._source_assets_by_key = source_assets_by_key def load_partition_sets_from_pipelines() -> List[PartitionSetDefinition]: job_partition_sets = [] for pipeline in self.get_all_pipelines(): if isinstance(pipeline, JobDefinition): job_partition_set = pipeline.get_partition_set_def() if job_partition_set: # should only return a partition set if this was constructed using the job # API, with a partitioned config job_partition_sets.append(job_partition_set) return job_partition_sets self._partition_sets = _CacheingDefinitionIndex( PartitionSetDefinition, "PartitionSetDefinition", "partition set", merge_dicts( {partition_set.name: partition_set for partition_set in schedule_partition_sets}, partition_sets, ), self._validate_partition_set, load_partition_sets_from_pipelines, ) self._sensors = _CacheingDefinitionIndex( SensorDefinition, "SensorDefinition", "sensor", sensors, self._validate_sensor, ) # load all sensors to force validation self._sensors.get_all_definitions() self._all_pipelines = None self._all_jobs = None @staticmethod def from_dict(repository_definitions: Dict[str, Dict[str, Any]]) -> "CachingRepositoryData": """Static constructor. Args: repository_definition (Dict[str, Dict[str, ...]]): A dict of the form: { 'pipelines': Dict[str, Callable[[], PipelineDefinition]], 'jobs': Dict[str, Callable[[], JobDefinition]], 'partition_sets': Dict[str, Callable[[], PartitionSetDefinition]], 'schedules': Dict[str, Callable[[], ScheduleDefinition]] } This form is intended to allow definitions to be created lazily when accessed by name, which can be helpful for performance when there are many definitions in a repository, or when constructing the definitions is costly. """ check.dict_param(repository_definitions, "repository_definitions", key_type=str) check.invariant( set(repository_definitions.keys()).issubset(VALID_REPOSITORY_DATA_DICT_KEYS), "Bad dict: must not contain keys other than {{{valid_keys}}}: found {bad_keys}.".format( valid_keys=", ".join( ["'{key}'".format(key=key) for key in VALID_REPOSITORY_DATA_DICT_KEYS] ), bad_keys=", ".join( [ "'{key}'" for key in repository_definitions.keys() if key not in VALID_REPOSITORY_DATA_DICT_KEYS ] ), ), ) for key in VALID_REPOSITORY_DATA_DICT_KEYS: if key not in repository_definitions: repository_definitions[key] = {} duplicate_keys = set(repository_definitions["schedules"].keys()).intersection( set(repository_definitions["sensors"].keys()) ) if duplicate_keys: raise DagsterInvalidDefinitionError( f"Duplicate definitions between schedules and sensors found for keys: {", ".join(duplicate_keys)}" ) # merge jobs in to pipelines while they are just implemented as pipelines for key, job in repository_definitions["jobs"].items(): if key in repository_definitions["pipelines"]: raise DagsterInvalidDefinitionError( f'Conflicting entries for name {key} in "jobs" and "pipelines".' ) if isinstance(job, GraphDefinition): repository_definitions["jobs"][key] = job.coerce_to_job() elif isinstance(job, UnresolvedAssetJobDefinition): repository_definitions["jobs"][key] = job.resolve( # TODO: https://github.com/dagster-io/dagster/issues/8263 assets=[], source_assets=[], ) elif not isinstance(job, JobDefinition) and not isfunction(job): raise DagsterInvalidDefinitionError( f"Object mapped to {key} is not an instance of JobDefinition or GraphDefinition." ) return CachingRepositoryData(**repository_definitions, source_assets_by_key={}) @classmethod def from_list( cls, repository_definitions: List[ Union[ PipelineDefinition, PartitionSetDefinition, ScheduleDefinition, SensorDefinition, "AssetGroup", GraphDefinition, UnresolvedAssetJobDefinition, ] ], default_executor_def: Optional[ExecutorDefinition] = None, ) -> "CachingRepositoryData": """Static constructor. Args: repository_definitions (List[Union[PipelineDefinition, PartitionSetDefinition, ScheduleDefinition, SensorDefinition, AssetGroup, GraphDefinition]]): Use this constructor when you have no need to lazy load pipelines/jobs or other definitions. """ from dagster.core.asset_defs import AssetGroup, AssetsDefinition pipelines_or_jobs: Dict[str, Union[PipelineDefinition, JobDefinition]] = {} coerced_graphs: Dict[str, JobDefinition] = {} unresolved_jobs: Dict[str, UnresolvedAssetJobDefinition] = {} partition_sets: Dict[str, PartitionSetDefinition] = {} schedules: Dict[str, ScheduleDefinition] = {} sensors: Dict[str, SensorDefinition] = {} assets_defs: List[AssetsDefinition] = [] source_assets: List[SourceAsset] = [] combined_asset_group = None for definition in repository_definitions: if isinstance(definition, PipelineDefinition): if ( definition.name in pipelines_or_jobs and pipelines_or_jobs[definition.name] != definition ) or definition.name in unresolved_jobs: raise DagsterInvalidDefinitionError( "Duplicate {target_type} definition found for {target}".format( target_type=definition.target_type, target=definition.describe_target() ) ) if AssetGroup.is_base_job_name(definition.name): raise DagsterInvalidDefinitionError( f"Attempted to provide job called {definition.name} to repository, which " "is a reserved name. Please rename the job." ) pipelines_or_jobs[definition.name] = definition elif isinstance(definition, PartitionSetDefinition): if definition.name in partition_sets: raise DagsterInvalidDefinitionError( "Duplicate partition set definition found for partition set " "{partition_set_name}".format(partition_set_name=definition.name) ) partition_sets[definition.name] = definition elif isinstance(definition, SensorDefinition): if definition.name in sensors or definition.name in schedules: raise DagsterInvalidDefinitionError( f"Duplicate definition found for {definition.name}" ) sensors[definition.name] = definition elif isinstance(definition, ScheduleDefinition): if definition.name in sensors or definition.name in schedules: raise DagsterInvalidDefinitionError( f"Duplicate definition found for {definition.name}" ) schedules[definition.name] = definition if isinstance(definition, PartitionScheduleDefinition): partition_set_def = definition.get_partition_set() if ( partition_set_def.name in partition_sets and partition_set_def != partition_sets[partition_set_def.name] ): raise DagsterInvalidDefinitionError( "Duplicate partition set definition found for partition set " "{partition_set_name}".format(partition_set_name=partition_set_def.name) ) partition_sets[partition_set_def.name] = partition_set_def elif isinstance(definition, GraphDefinition): coerced = definition.coerce_to_job() if coerced.name in pipelines_or_jobs: raise DagsterInvalidDefinitionError( "Duplicate {target_type} definition found for graph '{name}'".format( target_type=coerced.target_type, name=coerced.name ) ) pipelines_or_jobs[coerced.name] = coerced coerced_graphs[coerced.name] = coerced elif isinstance(definition, UnresolvedAssetJobDefinition): if definition.name in pipelines_or_jobs or definition.name in unresolved_jobs: raise DagsterInvalidDefinitionError( "Duplicate definition found for unresolved job '{name}'".format( name=definition.name ) ) # we can only resolve these once we have all assets unresolved_jobs[definition.name] = definition elif isinstance(definition, AssetGroup): if combined_asset_group: combined_asset_group += definition else: combined_asset_group = definition elif isinstance(definition, AssetsDefinition): assets_defs.append(definition) elif isinstance(definition, SourceAsset): source_assets.append(definition) else: check.failed(f"Unexpected repository entry {definition}") if assets_defs or source_assets: if combined_asset_group is not None: raise DagsterInvalidDefinitionError( "A repository can't have both an AssetGroup and direct asset defs" ) combined_asset_group = AssetGroup( assets=assets_defs, source_assets=source_assets, executor_def=default_executor_def, ) if combined_asset_group: for job_def in combined_asset_group.get_base_jobs(): pipelines_or_jobs[job_def.name] = job_def source_assets_by_key = { source_asset.key: source_asset for source_asset in combined_asset_group.source_assets } else: source_assets_by_key = {} for name, sensor_def in sensors.items(): if sensor_def.has_loadable_targets(): targets = sensor_def.load_targets() for target in targets: _process_and_validate_target( sensor_def, coerced_graphs, unresolved_jobs, pipelines_or_jobs, target ) for name, schedule_def in schedules.items(): if schedule_def.has_loadable_target(): target = schedule_def.load_target() _process_and_validate_target( schedule_def, coerced_graphs, unresolved_jobs, pipelines_or_jobs, target ) # resolve all the UnresolvedAssetJobDefinitions using the full set of assets for name, unresolved_job_def in unresolved_jobs.items(): if not combined_asset_group: raise DagsterInvalidDefinitionError( f"UnresolvedAssetJobDefinition {name} specified, but no AssetDefinitions exist " "on the repository." ) resolved_job = unresolved_job_def.resolve( assets=combined_asset_group.assets, source_assets=combined_asset_group.source_assets, ) pipelines_or_jobs[name] = resolved_job pipelines: Dict[str, PipelineDefinition] = {} jobs: Dict[str, JobDefinition] = {} for name, pipeline_or_job in pipelines_or_jobs.items(): if isinstance(pipeline_or_job, JobDefinition): jobs[name] = pipeline_or_job else: pipelines[name] = pipeline_or_job return CachingRepositoryData( pipelines=pipelines, jobs=jobs, partition_sets=partition_sets, schedules=schedules, sensors=sensors, source_assets_by_key=source_assets_by_key, ) def get_pipeline_names(self) -> List[str]: """Get the names of all pipelines/jobs in the repository. Returns: List[str] """ return self._pipelines.get_definition_names() + self.get_job_names() def get_job_names(self) -> List[str]: """Get the names of all jobs in the repository. Returns: List[str] """ return self._jobs.get_definition_names() def has_pipeline(self, pipeline_name: str) -> bool: """Check if a pipeline/job with a given name is present in the repository. Args: pipeline_name (str): The name of the pipeline/job. Returns: bool """ check.str_param(pipeline_name, "pipeline_name") return self._pipelines.has_definition(pipeline_name) or self._jobs.has_definition( pipeline_name ) def has_job(self, job_name: str) -> bool: """Check if a job with a given name is present in the repository. Args: job_name (str): The name of the job. Returns: bool """ check.str_param(job_name, "job_name") return self._jobs.has_definition(job_name) def get_all_pipelines(self) -> List[PipelineDefinition]: """Return all pipelines/jobs in the repository as a list. Note that this will construct any pipeline/job that has not yet been constructed. Returns: List[PipelineDefinition]: All pipelines/jobs in the repository. """ if self._all_pipelines is not None: return self._all_pipelines self._all_jobs = self._jobs.get_all_definitions() pipelines: List[PipelineDefinition] = [ *self._pipelines.get_all_definitions(), *self._all_jobs, ] self._check_solid_defs(pipelines) self._all_pipelines = pipelines return self._all_pipelines def get_all_jobs(self) -> List[JobDefinition]: """Return all jobs in the repository as a list. Note that this will construct any job that has not yet been constructed. Returns: List[JobDefinition]: All jobs in the repository. """ if self._all_jobs is not None: return self._all_jobs # _check_solid_defs enforces that pipeline and graph definition names are # unique within a repository. Loads pipelines in the line below to enforce # pipeline/job/graph uniqueness. self.get_all_pipelines() # The `get_all_pipelines` call ensures _all_jobs is set. return cast(List[JobDefinition], self._all_jobs) def get_pipeline(self, pipeline_name: str) -> PipelineDefinition: """Get a pipeline/job by name. If this pipeline/job has not yet been constructed, only this pipeline/job is constructed, and will be cached for future calls. Args: pipeline_name (str): Name of the pipeline/job to retrieve. Returns: PipelineDefinition: The pipeline/job definition corresponding to the given name. """ check.str_param(pipeline_name, "pipeline_name") if self._jobs.has_definition(pipeline_name): return self._jobs.get_definition(pipeline_name) else: return self._pipelines.get_definition(pipeline_name) def get_job(self, job_name: str) -> JobDefinition: """Get a job by name. If this job has not yet been constructed, only this job is constructed, and will be cached for future calls. Args: job_name (str): Name of the job to retrieve. Returns: JobDefinition: The job definition corresponding to the given name. """ check.str_param(job_name, "job_name") return self._jobs.get_definition(job_name) def get_partition_set_names(self) -> List[str]: """Get the names of all partition sets in the repository. Returns: List[str] """ return self._partition_sets.get_definition_names() def has_partition_set(self, partition_set_name: str) -> bool: """Check if a partition set with a given name is present in the repository. Args: partition_set_name (str): The name of the partition set. Returns: bool """ check.str_param(partition_set_name, "partition_set_name") return self._partition_sets.has_definition(partition_set_name) def get_all_partition_sets(self) -> List[PartitionSetDefinition]: """Return all partition sets in the repository as a list. Note that this will construct any partition set that has not yet been constructed. Returns: List[PartitionSetDefinition]: All partition sets in the repository. """ return self._partition_sets.get_all_definitions() def get_partition_set(self, partition_set_name: str) -> PartitionSetDefinition: """Get a partition set by name. If this partition set has not yet been constructed, only this partition set is constructed, and will be cached for future calls. Args: partition_set_name (str): Name of the partition set to retrieve. Returns: PartitionSetDefinition: The partition set definition corresponding to the given name. """ check.str_param(partition_set_name, "partition_set_name") return self._partition_sets.get_definition(partition_set_name) def get_schedule_names(self) -> List[str]: """Get the names of all schedules in the repository. Returns: List[str] """ return self._schedules.get_definition_names() def get_all_schedules(self) -> List[ScheduleDefinition]: """Return all schedules in the repository as a list. Note that this will construct any schedule that has not yet been constructed. Returns: List[ScheduleDefinition]: All schedules in the repository. """ return self._schedules.get_all_definitions() def get_schedule(self, schedule_name: str) -> ScheduleDefinition: """Get a schedule by name. if this schedule has not yet been constructed, only this schedule is constructed, and will be cached for future calls. args: schedule_name (str): name of the schedule to retrieve. Returns: ScheduleDefinition: The schedule definition corresponding to the given name. """ check.str_param(schedule_name, "schedule_name") return self._schedules.get_definition(schedule_name) def has_schedule(self, schedule_name: str) -> bool: check.str_param(schedule_name, "schedule_name") return self._schedules.has_definition(schedule_name) def get_all_sensors(self) -> List[SensorDefinition]: return self._sensors.get_all_definitions() def get_sensor_names(self) -> List[str]: return self._sensors.get_definition_names() def get_sensor(self, sensor_name: str) -> SensorDefinition: return self._sensors.get_definition(sensor_name) def has_sensor(self, sensor_name: str) -> bool: return self._sensors.has_definition(sensor_name) def get_source_assets_by_key(self) -> Mapping[AssetKey, SourceAsset]: return self._source_assets_by_key def _check_solid_defs(self, pipelines: List[PipelineDefinition]) -> None: solid_defs = {} solid_to_pipeline = {} for pipeline in pipelines: for solid_def in [*pipeline.all_node_defs, pipeline.graph]: # skip checks for subselected graphs because they don't have their own names if isinstance(solid_def, SubselectedGraphDefinition): break if solid_def.name not in solid_defs: solid_defs[solid_def.name] = solid_def solid_to_pipeline[solid_def.name] = pipeline.name if not solid_defs[solid_def.name] is solid_def: first_name, second_name = sorted( [solid_to_pipeline[solid_def.name], pipeline.name] ) raise DagsterInvalidDefinitionError( ( f"Conflicting definitions found in repository with name '{solid_def.name}'. " "Op/Graph/Solid definition names must be unique within a " f"repository. {solid_def.__class__.__name__} is defined in {pipeline.target_type} " f"'{first_name}' and in {pipeline.target_type} '{second_name}'." ) ) def _validate_pipeline(self, pipeline: PipelineDefinition) -> PipelineDefinition: return pipeline def _validate_job(self, job: JobDefinition) -> JobDefinition: return job def _validate_schedule(self, schedule: ScheduleDefinition) -> ScheduleDefinition: pipelines = self.get_pipeline_names() if schedule.pipeline_name not in pipelines: raise DagsterInvalidDefinitionError( f'ScheduleDefinition "{schedule.name}" targets job/pipeline "{schedule.pipeline_name}" ' "which was not found in this repository." ) return schedule def _validate_sensor(self, sensor: SensorDefinition) -> SensorDefinition: pipelines = self.get_pipeline_names() if len(sensor.targets) == 0: # skip validation when the sensor does not target a pipeline return sensor for target in sensor.targets: if target.pipeline_name not in pipelines: raise DagsterInvalidDefinitionError( f'SensorDefinition "{sensor.name}" targets job/pipeline "{sensor.pipeline_name}" ' "which was not found in this repository." ) return sensor def _validate_partition_set( self, partition_set: PartitionSetDefinition ) -> PartitionSetDefinition: return partition_set class RepositoryDefinition: """Define a repository that contains a group of definitions. Users should typically not create objects of this class directly. Instead, use the :py:func:`@repository` decorator. Args: name (str): The name of the repository. repository_data (RepositoryData): Contains the definitions making up the repository. description (Optional[str]): A string description of the repository. """ def __init__( self, name, repository_data, description=None, ): self._name = check_valid_name(name) self._description = check.opt_str_param(description, "description") self._repository_data = check.inst_param(repository_data, "repository_data", RepositoryData) @property def name(self) -> str: return self._name @property def description(self) -> Optional[str]: return self._description def load_all_definitions(self): # force load of all lazy constructed code artifacts self._repository_data.load_all_definitions() @property def pipeline_names(self) -> List[str]: """List[str]: Names of all pipelines/jobs in the repository""" return self._repository_data.get_pipeline_names() @property def job_names(self) -> List[str]: """List[str]: Names of all jobs in the repository""" return self._repository_data.get_job_names() def has_pipeline(self, name: str) -> bool: """Check if a pipeline/job with a given name is present in the repository. Args: name (str): The name of the pipeline/job. Returns: bool """ return self._repository_data.has_pipeline(name) def get_pipeline(self, name: str) -> PipelineDefinition: """Get a pipeline/job by name. If this pipeline/job is present in the lazily evaluated dictionary passed to the constructor, but has not yet been constructed, only this pipeline/job is constructed, and will be cached for future calls. Args: name (str): Name of the pipeline/job to retrieve. Returns: PipelineDefinition: The pipeline/job definition corresponding to the given name. """ return self._repository_data.get_pipeline(name) def get_all_pipelines(self) -> List[PipelineDefinition]: """Return all pipelines/jobs in the repository as a list. Note that this will construct any pipeline/job in the lazily evaluated dictionary that has not yet been constructed. Returns: List[PipelineDefinition]: All pipelines/jobs in the repository. """ return self._repository_data.get_all_pipelines() def has_job(self, name: str) -> bool: """Check if a job with a given name is present in the repository. Args: name (str): The name of the job. Returns: bool """ return self._repository_data.has_job(name) def get_job(self, name: str) -> JobDefinition: """Get a job by name. If this job is present in the lazily evaluated dictionary passed to the constructor, but has not yet been constructed, only this job is constructed, and will be cached for future calls. Args: name (str): Name of the job to retrieve. Returns: JobDefinition: The job definition corresponding to the given name. """ return self._repository_data.get_job(name) def get_all_jobs(self) -> List[JobDefinition]: """Return all jobs in the repository as a list. Note that this will construct any job in the lazily evaluated dictionary that has not yet been constructed. Returns: List[JobDefinition]: All jobs in the repository. """ return self._repository_data.get_all_jobs() @property def partition_set_defs(self) -> List[PartitionSetDefinition]: return self._repository_data.get_all_partition_sets() def get_partition_set_def(self, name: str) -> PartitionSetDefinition: return self._repository_data.get_partition_set(name) @property def schedule_defs(self) -> List[ScheduleDefinition]: return self._repository_data.get_all_schedules() def get_schedule_def(self, name: str) -> ScheduleDefinition: return self._repository_data.get_schedule(name) def has_schedule_def(self, name: str) -> bool: return self._repository_data.has_schedule(name) @property def sensor_defs(self) -> List[SensorDefinition]: return self._repository_data.get_all_sensors() def get_sensor_def(self, name: str) -> SensorDefinition: return self._repository_data.get_sensor(name) def has_sensor_def(self, name: str) -> bool: return self._repository_data.has_sensor(name) @property def source_assets_by_key(self) -> Dict[AssetKey, SourceAsset]: return self._repository_data.get_source_assets_by_key() # If definition comes from the @repository decorator, then the __call__ method will be # overwritten. Therefore, we want to maintain the call-ability of repository definitions. def __call__(self, *args, **kwargs): return self def _process_and_validate_target( schedule_or_sensor_def: Union[SensorDefinition, ScheduleDefinition], coerced_graphs: Dict[str, JobDefinition], unresolved_jobs: Dict[str, UnresolvedAssetJobDefinition], pipelines_or_jobs: Dict[str, PipelineDefinition], target: Union[GraphDefinition, PipelineDefinition, UnresolvedAssetJobDefinition], ): # This function modifies the state of coerced_graphs and unresolved_jobs targeter = ( f"schedule '{schedule_or_sensor_def.name}'" if isinstance(schedule_or_sensor_def, ScheduleDefinition) else f"sensor '{schedule_or_sensor_def.name}'" ) if isinstance(target, GraphDefinition): if target.name not in coerced_graphs: # Since this is a graph we have to coerce, it is not possible to be # the same definition by reference equality if target.name in pipelines_or_jobs: dupe_target_type = pipelines_or_jobs[target.name].target_type raise DagsterInvalidDefinitionError( _get_error_msg_for_target_conflict( targeter, "graph", target.name, dupe_target_type ) ) elif coerced_graphs[target.name].graph != target: raise DagsterInvalidDefinitionError( _get_error_msg_for_target_conflict(targeter, "graph", target.name, "graph") ) coerced_job = target.coerce_to_job() coerced_graphs[target.name] = coerced_job pipelines_or_jobs[target.name] = coerced_job elif isinstance(target, UnresolvedAssetJobDefinition): if target.name not in unresolved_jobs: # Since this is am unresolved job we have to resolve, it is not possible to # be the same definition by reference equality if target.name in pipelines_or_jobs: dupe_target_type = pipelines_or_jobs[target.name].target_type raise DagsterInvalidDefinitionError( _get_error_msg_for_target_conflict( targeter, "unresolved asset job", target.name, dupe_target_type ) ) elif unresolved_jobs[target.name].selection != target.selection: raise DagsterInvalidDefinitionError( _get_error_msg_for_target_conflict( targeter, "unresolved asset job", target.name, "unresolved asset job" ) ) unresolved_jobs[target.name] = target else: if target.name in pipelines_or_jobs and pipelines_or_jobs[target.name] != target: dupe_target_type = ( "graph" if target.name in coerced_graphs else "unresolved asset job" if target.name in unresolved_jobs else pipelines_or_jobs[target.name].target_type ) raise DagsterInvalidDefinitionError( _get_error_msg_for_target_conflict( targeter, target.target_type, target.name, dupe_target_type ) ) pipelines_or_jobs[target.name] = target def _get_error_msg_for_target_conflict(targeter, target_type, target_name, dupe_target_type): return f"{targeter} targets {target_type} '{target_name}', but a different {dupe_target_type} with the same name was provided. Disambiguate between these by providing a separate name to one of them."
from abc import ABC, abstractmethod from inspect import isfunction from types import FunctionType from typing import ( TYPE_CHECKING, Any, Callable, Dict, Generic, List, Mapping, Optional, Type, TypeVar, Union, cast, ) import dagster._check as check from dagster.core.asset_defs.source_asset import SourceAsset from dagster.core.errors import DagsterInvalidDefinitionError, DagsterInvariantViolationError from dagster.utils import merge_dicts from .events import AssetKey from .executor_definition import ExecutorDefinition from .graph_definition import GraphDefinition, SubselectedGraphDefinition from .job_definition import JobDefinition from .partition import PartitionScheduleDefinition, PartitionSetDefinition from .pipeline_definition import PipelineDefinition from .schedule_definition import ScheduleDefinition from .sensor_definition import SensorDefinition from .unresolved_asset_job_definition import UnresolvedAssetJobDefinition from .utils import check_valid_name if TYPE_CHECKING: from dagster.core.asset_defs.asset_group import AssetGroup from dagster.core.asset_defs.assets import AssetsDefinition VALID_REPOSITORY_DATA_DICT_KEYS = { "pipelines", "partition_sets", "schedules", "sensors", "jobs", } RepositoryLevelDefinition = TypeVar( "RepositoryLevelDefinition", PipelineDefinition, JobDefinition, PartitionSetDefinition, ScheduleDefinition, SensorDefinition, ) class _CacheingDefinitionIndex(Generic[RepositoryLevelDefinition]): def __init__( self, definition_class: Type[RepositoryLevelDefinition], definition_class_name: str, definition_kind: str, definitions: Mapping[ str, Union[RepositoryLevelDefinition, Callable[[], RepositoryLevelDefinition]] ], validation_fn: Callable[[RepositoryLevelDefinition], RepositoryLevelDefinition], lazy_definitions_fn: Optional[Callable[[], List[RepositoryLevelDefinition]]] = None, ): """ Args: definitions: A dictionary of definition names to definitions or functions that load definitions. lazy_definitions_fn: A function for loading a list of definitions whose names are not even known until loaded. """ for key, definition in definitions.items(): check.invariant( isinstance(definition, definition_class) or callable(definition), "Bad definition for {definition_kind} {key}: must be {definition_class_name} or " "callable, got {type_}".format( definition_kind=definition_kind, key=key, definition_class_name=definition_class_name, type_=type(definition), ), ) self._definition_class: Type[RepositoryLevelDefinition] = definition_class self._definition_class_name = definition_class_name self._definition_kind = definition_kind self._validation_fn: Callable[ [RepositoryLevelDefinition], RepositoryLevelDefinition ] = validation_fn self._definitions: Mapping[ str, Union[RepositoryLevelDefinition, Callable[[], RepositoryLevelDefinition]] ] = definitions self._definition_cache: Dict[str, RepositoryLevelDefinition] = {} self._definition_names: Optional[List[str]] = None self._lazy_definitions_fn: Callable[ [], List[RepositoryLevelDefinition] ] = lazy_definitions_fn or (lambda: []) self._lazy_definitions: Optional[List[RepositoryLevelDefinition]] = None self._all_definitions: Optional[List[RepositoryLevelDefinition]] = None def _get_lazy_definitions(self) -> List[RepositoryLevelDefinition]: if self._lazy_definitions is None: self._lazy_definitions = self._lazy_definitions_fn() for definition in self._lazy_definitions: self._validate_and_cache_definition(definition, definition.name) return self._lazy_definitions def get_definition_names(self) -> List[str]: if self._definition_names: return self._definition_names lazy_names = [] for definition in self._get_lazy_definitions(): strict_definition = self._definitions.get(definition.name) if strict_definition: check.invariant( strict_definition == definition, f"Duplicate definition found for {definition.name}", ) else: lazy_names.append(definition.name) self._definition_names = list(self._definitions.keys()) + lazy_names return self._definition_names def has_definition(self, definition_name: str) -> bool: check.str_param(definition_name, "definition_name") return definition_name in self.get_definition_names() def get_all_definitions(self) -> List[RepositoryLevelDefinition]: if self._all_definitions is not None: return self._all_definitions self._all_definitions = list( sorted( map(self.get_definition, self.get_definition_names()), key=lambda definition: definition.name, ) ) return self._all_definitions def get_definition(self, definition_name: str) -> RepositoryLevelDefinition: check.str_param(definition_name, "definition_name") if not self.has_definition(definition_name): raise DagsterInvariantViolationError( "Could not find {definition_kind} '{definition_name}'. Found: " "{found_names}.".format( definition_kind=self._definition_kind, definition_name=definition_name, found_names=", ".join( [ "'{found_name}'".format(found_name=found_name) for found_name in self.get_definition_names() ] ), ) ) if definition_name in self._definition_cache: return self._definition_cache[definition_name] definition_source = self._definitions[definition_name] if isinstance(definition_source, self._definition_class): self._definition_cache[definition_name] = self._validation_fn(definition_source) return definition_source else: definition = cast(Callable, definition_source)() self._validate_and_cache_definition(definition, definition_name) return definition def _validate_and_cache_definition( self, definition: RepositoryLevelDefinition, definition_dict_key: str ): check.invariant( isinstance(definition, self._definition_class), "Bad constructor for {definition_kind} {definition_name}: must return " "{definition_class_name}, got value of type {type_}".format( definition_kind=self._definition_kind, definition_name=definition_dict_key, definition_class_name=self._definition_class_name, type_=type(definition), ), ) check.invariant( definition.name == definition_dict_key, "Bad constructor for {definition_kind} '{definition_name}': name in " "{definition_class_name} does not match: got '{definition_def_name}'".format( definition_kind=self._definition_kind, definition_name=definition_dict_key, definition_class_name=self._definition_class_name, definition_def_name=definition.name, ), ) self._definition_cache[definition_dict_key] = self._validation_fn(definition) class RepositoryData(ABC): """ Users should usually rely on the :py:func:`@repository <repository>` decorator to create new repositories, which will in turn call the static constructors on this class. However, users may subclass :py:class:`RepositoryData` for fine-grained control over access to and lazy creation of repository members. """ @abstractmethod def get_all_pipelines(self) -> List[PipelineDefinition]: """Return all pipelines/jobs in the repository as a list. Returns: List[PipelineDefinition]: All pipelines/jobs in the repository. """ def get_all_jobs(self) -> List[JobDefinition]: """Return all jobs in the repository as a list. Returns: List[JobDefinition]: All jobs in the repository. """ return [job for job in self.get_all_pipelines() if isinstance(job, JobDefinition)] def get_pipeline_names(self) -> List[str]: """Get the names of all pipelines/jobs in the repository. Returns: List[str] """ return [pipeline_def.name for pipeline_def in self.get_all_pipelines()] def get_job_names(self) -> List[str]: """Get the names of all jobs in the repository. Returns: List[str] """ return [job_def.name for job_def in self.get_all_jobs()] def has_pipeline(self, pipeline_name: str) -> bool: """Check if a pipeline/job with a given name is present in the repository. Args: pipeline_name (str): The name of the pipeline/job. Returns: bool """ return pipeline_name in self.get_pipeline_names() def has_job(self, job_name: str) -> bool: """Check if a job with a given name is present in the repository. Args: job_name (str): The name of the job. Returns: bool """ return job_name in self.get_job_names() def get_pipeline(self, pipeline_name) -> PipelineDefinition: """Get a pipeline/job by name. Args: pipeline_name (str): Name of the pipeline/job to retrieve. Returns: PipelineDefinition: The pipeline/job definition corresponding to the given name. """ pipelines_with_name = [ pipeline for pipeline in self.get_all_pipelines() if pipeline.name == pipeline_name ] if not pipelines_with_name: raise DagsterInvariantViolationError( f"Could not find pipeline/job {pipeline_name} in repository" ) return pipelines_with_name[0] def get_job(self, job_name: str) -> JobDefinition: """Get a job by name. Args: job_name (str): Name of the job to retrieve. Returns: JobDefinition: The job definition corresponding to the given name. """ match = next(job for job in self.get_all_jobs() if job.name == job_name) if match is None: raise DagsterInvariantViolationError(f"Could not find job {job_name} in repository") return match def get_partition_set_names(self): """Get the names of all partition sets in the repository. Returns: List[str] """ return [partition_set.name for partition_set in self.get_all_partition_sets()] def has_partition_set(self, partition_set_name: str) -> bool: """Check if a partition set with a given name is present in the repository. Args: partition_set_name (str): The name of the partition set. Returns: bool """ return partition_set_name in self.get_partition_set_names() def get_all_partition_sets(self) -> List[PartitionSetDefinition]: """Return all partition sets in the repository as a list. Returns: List[PartitionSetDefinition]: All partition sets in the repository. """ return [] def get_partition_set(self, partition_set_name: str) -> PartitionSetDefinition: """Get a partition set by name. Args: partition_set_name (str): Name of the partition set to retrieve. Returns: PartitionSetDefinition: The partition set definition corresponding to the given name. """ partition_sets_with_name = [ partition_set for partition_set in self.get_all_partition_sets() if partition_set.name == partition_set_name ] if not partition_sets_with_name: raise DagsterInvariantViolationError( f"Could not find partition set {partition_set_name} in repository" ) return partition_sets_with_name[0] def get_schedule_names(self) -> List[str]: """Get the names of all schedules in the repository. Returns: List[str] """ return [schedule.name for schedule in self.get_all_schedules()] def get_all_schedules(self) -> List[ScheduleDefinition]: """Return all schedules in the repository as a list. Returns: List[ScheduleDefinition]: All pipelines in the repository. """ return [] def get_schedule(self, schedule_name: str) -> ScheduleDefinition: """Get a schedule by name. args: schedule_name (str): name of the schedule to retrieve. Returns: ScheduleDefinition: The schedule definition corresponding to the given name. """ schedules_with_name = [ schedule for schedule in self.get_all_schedules() if schedule.name == schedule_name ] if not schedules_with_name: raise DagsterInvariantViolationError( f"Could not find schedule {schedule_name} in repository" ) return schedules_with_name[0] def has_schedule(self, schedule_name: str) -> bool: return schedule_name in self.get_schedule_names() def get_all_sensors(self) -> List[SensorDefinition]: return [] def get_sensor_names(self) -> List[str]: return [sensor.name for sensor in self.get_all_sensors()] def get_sensor(self, sensor_name: str) -> SensorDefinition: sensors_with_name = [ sensor for sensor in self.get_all_sensors() if sensor.name == sensor_name ] if not sensors_with_name: raise DagsterInvariantViolationError( f"Could not find sensor {sensor_name} in repository" ) return sensors_with_name[0] def has_sensor(self, sensor_name: str) -> bool: return sensor_name in self.get_sensor_names() def get_source_assets_by_key(self) -> Mapping[AssetKey, SourceAsset]: return {} def load_all_definitions(self): # force load of all lazy constructed code artifacts self.get_all_pipelines() self.get_all_jobs() self.get_all_partition_sets() self.get_all_schedules() self.get_all_sensors() self.get_source_assets_by_key() T = TypeVar("T") Resolvable = Callable[[], T] class CachingRepositoryData(RepositoryData): """Default implementation of RepositoryData used by the :py:func:`@repository <repository>` decorator.""" _all_jobs: Optional[List[JobDefinition]] _all_pipelines: Optional[List[PipelineDefinition]] def __init__( self, pipelines: Mapping[str, Union[PipelineDefinition, Resolvable[PipelineDefinition]]], jobs: Mapping[str, Union[JobDefinition, Resolvable[JobDefinition]]], partition_sets: Mapping[ str, Union[PartitionSetDefinition, Resolvable[PartitionSetDefinition]] ], schedules: Mapping[str, Union[ScheduleDefinition, Resolvable[ScheduleDefinition]]], sensors: Mapping[str, Union[SensorDefinition, Resolvable[SensorDefinition]]], source_assets_by_key: Mapping[AssetKey, SourceAsset], ): """Constructs a new CachingRepositoryData object. You may pass pipeline, job, partition_set, and schedule definitions directly, or you may pass callables with no arguments that will be invoked to lazily construct definitions when accessed by name. This can be helpful for performance when there are many definitions in a repository, or when constructing the definitions is costly. Note that when lazily constructing a definition, the name of the definition must match its key in its dictionary index, or a :py:class:`DagsterInvariantViolationError` will be thrown at retrieval time. Args: pipelines (Mapping[str, Union[PipelineDefinition, Callable[[], PipelineDefinition]]]): The pipeline definitions belonging to the repository. jobs (Mapping[str, Union[JobDefinition, Callable[[], JobDefinition]]]): The job definitions belonging to the repository. partition_sets (Mapping[str, Union[PartitionSetDefinition, Callable[[], PartitionSetDefinition]]]): The partition sets belonging to the repository. schedules (Mapping[str, Union[ScheduleDefinition, Callable[[], ScheduleDefinition]]]): The schedules belonging to the repository. sensors (Mapping[str, Union[SensorDefinition, Callable[[], SensorDefinition]]]): The sensors belonging to a repository. source_assets_by_key (Mapping[AssetKey, SourceAsset]): The source assets belonging to a repository. """ check.mapping_param( pipelines, "pipelines", key_type=str, value_type=(PipelineDefinition, FunctionType) ) check.mapping_param(jobs, "jobs", key_type=str, value_type=(JobDefinition, FunctionType)) check.mapping_param( partition_sets, "partition_sets", key_type=str, value_type=(PartitionSetDefinition, FunctionType), ) check.mapping_param( schedules, "schedules", key_type=str, value_type=(ScheduleDefinition, FunctionType) ) check.mapping_param( sensors, "sensors", key_type=str, value_type=(SensorDefinition, FunctionType) ) check.mapping_param( source_assets_by_key, "source_assets_by_key", key_type=AssetKey, value_type=SourceAsset ) self._pipelines = _CacheingDefinitionIndex( PipelineDefinition, "PipelineDefinition", "pipeline", pipelines, self._validate_pipeline, ) self._jobs = _CacheingDefinitionIndex( JobDefinition, "JobDefinition", "job", jobs, self._validate_job, ) self._schedules = _CacheingDefinitionIndex( ScheduleDefinition, "ScheduleDefinition", "schedule", schedules, self._validate_schedule, ) schedule_partition_sets = [ schedule.get_partition_set() for schedule in self._schedules.get_all_definitions() if isinstance(schedule, PartitionScheduleDefinition) ] self._source_assets_by_key = source_assets_by_key def load_partition_sets_from_pipelines() -> List[PartitionSetDefinition]: job_partition_sets = [] for pipeline in self.get_all_pipelines(): if isinstance(pipeline, JobDefinition): job_partition_set = pipeline.get_partition_set_def() if job_partition_set: # should only return a partition set if this was constructed using the job # API, with a partitioned config job_partition_sets.append(job_partition_set) return job_partition_sets self._partition_sets = _CacheingDefinitionIndex( PartitionSetDefinition, "PartitionSetDefinition", "partition set", merge_dicts( {partition_set.name: partition_set for partition_set in schedule_partition_sets}, partition_sets, ), self._validate_partition_set, load_partition_sets_from_pipelines, ) self._sensors = _CacheingDefinitionIndex( SensorDefinition, "SensorDefinition", "sensor", sensors, self._validate_sensor, ) # load all sensors to force validation self._sensors.get_all_definitions() self._all_pipelines = None self._all_jobs = None @staticmethod def from_dict(repository_definitions: Dict[str, Dict[str, Any]]) -> "CachingRepositoryData": """Static constructor. Args: repository_definition (Dict[str, Dict[str, ...]]): A dict of the form: { 'pipelines': Dict[str, Callable[[], PipelineDefinition]], 'jobs': Dict[str, Callable[[], JobDefinition]], 'partition_sets': Dict[str, Callable[[], PartitionSetDefinition]], 'schedules': Dict[str, Callable[[], ScheduleDefinition]] } This form is intended to allow definitions to be created lazily when accessed by name, which can be helpful for performance when there are many definitions in a repository, or when constructing the definitions is costly. """ check.dict_param(repository_definitions, "repository_definitions", key_type=str) check.invariant( set(repository_definitions.keys()).issubset(VALID_REPOSITORY_DATA_DICT_KEYS), "Bad dict: must not contain keys other than {{{valid_keys}}}: found {bad_keys}.".format( valid_keys=", ".join( ["'{key}'".format(key=key) for key in VALID_REPOSITORY_DATA_DICT_KEYS] ), bad_keys=", ".join( [ "'{key}'" for key in repository_definitions.keys() if key not in VALID_REPOSITORY_DATA_DICT_KEYS ] ), ), ) for key in VALID_REPOSITORY_DATA_DICT_KEYS: if key not in repository_definitions: repository_definitions[key] = {} duplicate_keys = set(repository_definitions["schedules"].keys()).intersection( set(repository_definitions["sensors"].keys()) ) if duplicate_keys: raise DagsterInvalidDefinitionError( f"Duplicate definitions between schedules and sensors found for keys: {', '.join(duplicate_keys)}" ) # merge jobs in to pipelines while they are just implemented as pipelines for key, job in repository_definitions["jobs"].items(): if key in repository_definitions["pipelines"]: raise DagsterInvalidDefinitionError( f'Conflicting entries for name {key} in "jobs" and "pipelines".' ) if isinstance(job, GraphDefinition): repository_definitions["jobs"][key] = job.coerce_to_job() elif isinstance(job, UnresolvedAssetJobDefinition): repository_definitions["jobs"][key] = job.resolve( # TODO: https://github.com/dagster-io/dagster/issues/8263 assets=[], source_assets=[], ) elif not isinstance(job, JobDefinition) and not isfunction(job): raise DagsterInvalidDefinitionError( f"Object mapped to {key} is not an instance of JobDefinition or GraphDefinition." ) return CachingRepositoryData(**repository_definitions, source_assets_by_key={}) @classmethod def from_list( cls, repository_definitions: List[ Union[ PipelineDefinition, PartitionSetDefinition, ScheduleDefinition, SensorDefinition, "AssetGroup", GraphDefinition, UnresolvedAssetJobDefinition, ] ], default_executor_def: Optional[ExecutorDefinition] = None, ) -> "CachingRepositoryData": """Static constructor. Args: repository_definitions (List[Union[PipelineDefinition, PartitionSetDefinition, ScheduleDefinition, SensorDefinition, AssetGroup, GraphDefinition]]): Use this constructor when you have no need to lazy load pipelines/jobs or other definitions. """ from dagster.core.asset_defs import AssetGroup, AssetsDefinition pipelines_or_jobs: Dict[str, Union[PipelineDefinition, JobDefinition]] = {} coerced_graphs: Dict[str, JobDefinition] = {} unresolved_jobs: Dict[str, UnresolvedAssetJobDefinition] = {} partition_sets: Dict[str, PartitionSetDefinition] = {} schedules: Dict[str, ScheduleDefinition] = {} sensors: Dict[str, SensorDefinition] = {} assets_defs: List[AssetsDefinition] = [] source_assets: List[SourceAsset] = [] combined_asset_group = None for definition in repository_definitions: if isinstance(definition, PipelineDefinition): if ( definition.name in pipelines_or_jobs and pipelines_or_jobs[definition.name] != definition ) or definition.name in unresolved_jobs: raise DagsterInvalidDefinitionError( "Duplicate {target_type} definition found for {target}".format( target_type=definition.target_type, target=definition.describe_target() ) ) if AssetGroup.is_base_job_name(definition.name): raise DagsterInvalidDefinitionError( f"Attempted to provide job called {definition.name} to repository, which " "is a reserved name. Please rename the job." ) pipelines_or_jobs[definition.name] = definition elif isinstance(definition, PartitionSetDefinition): if definition.name in partition_sets: raise DagsterInvalidDefinitionError( "Duplicate partition set definition found for partition set " "{partition_set_name}".format(partition_set_name=definition.name) ) partition_sets[definition.name] = definition elif isinstance(definition, SensorDefinition): if definition.name in sensors or definition.name in schedules: raise DagsterInvalidDefinitionError( f"Duplicate definition found for {definition.name}" ) sensors[definition.name] = definition elif isinstance(definition, ScheduleDefinition): if definition.name in sensors or definition.name in schedules: raise DagsterInvalidDefinitionError( f"Duplicate definition found for {definition.name}" ) schedules[definition.name] = definition if isinstance(definition, PartitionScheduleDefinition): partition_set_def = definition.get_partition_set() if ( partition_set_def.name in partition_sets and partition_set_def != partition_sets[partition_set_def.name] ): raise DagsterInvalidDefinitionError( "Duplicate partition set definition found for partition set " "{partition_set_name}".format(partition_set_name=partition_set_def.name) ) partition_sets[partition_set_def.name] = partition_set_def elif isinstance(definition, GraphDefinition): coerced = definition.coerce_to_job() if coerced.name in pipelines_or_jobs: raise DagsterInvalidDefinitionError( "Duplicate {target_type} definition found for graph '{name}'".format( target_type=coerced.target_type, name=coerced.name ) ) pipelines_or_jobs[coerced.name] = coerced coerced_graphs[coerced.name] = coerced elif isinstance(definition, UnresolvedAssetJobDefinition): if definition.name in pipelines_or_jobs or definition.name in unresolved_jobs: raise DagsterInvalidDefinitionError( "Duplicate definition found for unresolved job '{name}'".format( name=definition.name ) ) # we can only resolve these once we have all assets unresolved_jobs[definition.name] = definition elif isinstance(definition, AssetGroup): if combined_asset_group: combined_asset_group += definition else: combined_asset_group = definition elif isinstance(definition, AssetsDefinition): assets_defs.append(definition) elif isinstance(definition, SourceAsset): source_assets.append(definition) else: check.failed(f"Unexpected repository entry {definition}") if assets_defs or source_assets: if combined_asset_group is not None: raise DagsterInvalidDefinitionError( "A repository can't have both an AssetGroup and direct asset defs" ) combined_asset_group = AssetGroup( assets=assets_defs, source_assets=source_assets, executor_def=default_executor_def, ) if combined_asset_group: for job_def in combined_asset_group.get_base_jobs(): pipelines_or_jobs[job_def.name] = job_def source_assets_by_key = { source_asset.key: source_asset for source_asset in combined_asset_group.source_assets } else: source_assets_by_key = {} for name, sensor_def in sensors.items(): if sensor_def.has_loadable_targets(): targets = sensor_def.load_targets() for target in targets: _process_and_validate_target( sensor_def, coerced_graphs, unresolved_jobs, pipelines_or_jobs, target ) for name, schedule_def in schedules.items(): if schedule_def.has_loadable_target(): target = schedule_def.load_target() _process_and_validate_target( schedule_def, coerced_graphs, unresolved_jobs, pipelines_or_jobs, target ) # resolve all the UnresolvedAssetJobDefinitions using the full set of assets for name, unresolved_job_def in unresolved_jobs.items(): if not combined_asset_group: raise DagsterInvalidDefinitionError( f"UnresolvedAssetJobDefinition {name} specified, but no AssetDefinitions exist " "on the repository." ) resolved_job = unresolved_job_def.resolve( assets=combined_asset_group.assets, source_assets=combined_asset_group.source_assets, ) pipelines_or_jobs[name] = resolved_job pipelines: Dict[str, PipelineDefinition] = {} jobs: Dict[str, JobDefinition] = {} for name, pipeline_or_job in pipelines_or_jobs.items(): if isinstance(pipeline_or_job, JobDefinition): jobs[name] = pipeline_or_job else: pipelines[name] = pipeline_or_job return CachingRepositoryData( pipelines=pipelines, jobs=jobs, partition_sets=partition_sets, schedules=schedules, sensors=sensors, source_assets_by_key=source_assets_by_key, ) def get_pipeline_names(self) -> List[str]: """Get the names of all pipelines/jobs in the repository. Returns: List[str] """ return self._pipelines.get_definition_names() + self.get_job_names() def get_job_names(self) -> List[str]: """Get the names of all jobs in the repository. Returns: List[str] """ return self._jobs.get_definition_names() def has_pipeline(self, pipeline_name: str) -> bool: """Check if a pipeline/job with a given name is present in the repository. Args: pipeline_name (str): The name of the pipeline/job. Returns: bool """ check.str_param(pipeline_name, "pipeline_name") return self._pipelines.has_definition(pipeline_name) or self._jobs.has_definition( pipeline_name ) def has_job(self, job_name: str) -> bool: """Check if a job with a given name is present in the repository. Args: job_name (str): The name of the job. Returns: bool """ check.str_param(job_name, "job_name") return self._jobs.has_definition(job_name) def get_all_pipelines(self) -> List[PipelineDefinition]: """Return all pipelines/jobs in the repository as a list. Note that this will construct any pipeline/job that has not yet been constructed. Returns: List[PipelineDefinition]: All pipelines/jobs in the repository. """ if self._all_pipelines is not None: return self._all_pipelines self._all_jobs = self._jobs.get_all_definitions() pipelines: List[PipelineDefinition] = [ *self._pipelines.get_all_definitions(), *self._all_jobs, ] self._check_solid_defs(pipelines) self._all_pipelines = pipelines return self._all_pipelines def get_all_jobs(self) -> List[JobDefinition]: """Return all jobs in the repository as a list. Note that this will construct any job that has not yet been constructed. Returns: List[JobDefinition]: All jobs in the repository. """ if self._all_jobs is not None: return self._all_jobs # _check_solid_defs enforces that pipeline and graph definition names are # unique within a repository. Loads pipelines in the line below to enforce # pipeline/job/graph uniqueness. self.get_all_pipelines() # The `get_all_pipelines` call ensures _all_jobs is set. return cast(List[JobDefinition], self._all_jobs) def get_pipeline(self, pipeline_name: str) -> PipelineDefinition: """Get a pipeline/job by name. If this pipeline/job has not yet been constructed, only this pipeline/job is constructed, and will be cached for future calls. Args: pipeline_name (str): Name of the pipeline/job to retrieve. Returns: PipelineDefinition: The pipeline/job definition corresponding to the given name. """ check.str_param(pipeline_name, "pipeline_name") if self._jobs.has_definition(pipeline_name): return self._jobs.get_definition(pipeline_name) else: return self._pipelines.get_definition(pipeline_name) def get_job(self, job_name: str) -> JobDefinition: """Get a job by name. If this job has not yet been constructed, only this job is constructed, and will be cached for future calls. Args: job_name (str): Name of the job to retrieve. Returns: JobDefinition: The job definition corresponding to the given name. """ check.str_param(job_name, "job_name") return self._jobs.get_definition(job_name) def get_partition_set_names(self) -> List[str]: """Get the names of all partition sets in the repository. Returns: List[str] """ return self._partition_sets.get_definition_names() def has_partition_set(self, partition_set_name: str) -> bool: """Check if a partition set with a given name is present in the repository. Args: partition_set_name (str): The name of the partition set. Returns: bool """ check.str_param(partition_set_name, "partition_set_name") return self._partition_sets.has_definition(partition_set_name) def get_all_partition_sets(self) -> List[PartitionSetDefinition]: """Return all partition sets in the repository as a list. Note that this will construct any partition set that has not yet been constructed. Returns: List[PartitionSetDefinition]: All partition sets in the repository. """ return self._partition_sets.get_all_definitions() def get_partition_set(self, partition_set_name: str) -> PartitionSetDefinition: """Get a partition set by name. If this partition set has not yet been constructed, only this partition set is constructed, and will be cached for future calls. Args: partition_set_name (str): Name of the partition set to retrieve. Returns: PartitionSetDefinition: The partition set definition corresponding to the given name. """ check.str_param(partition_set_name, "partition_set_name") return self._partition_sets.get_definition(partition_set_name) def get_schedule_names(self) -> List[str]: """Get the names of all schedules in the repository. Returns: List[str] """ return self._schedules.get_definition_names() def get_all_schedules(self) -> List[ScheduleDefinition]: """Return all schedules in the repository as a list. Note that this will construct any schedule that has not yet been constructed. Returns: List[ScheduleDefinition]: All schedules in the repository. """ return self._schedules.get_all_definitions() def get_schedule(self, schedule_name: str) -> ScheduleDefinition: """Get a schedule by name. if this schedule has not yet been constructed, only this schedule is constructed, and will be cached for future calls. args: schedule_name (str): name of the schedule to retrieve. Returns: ScheduleDefinition: The schedule definition corresponding to the given name. """ check.str_param(schedule_name, "schedule_name") return self._schedules.get_definition(schedule_name) def has_schedule(self, schedule_name: str) -> bool: check.str_param(schedule_name, "schedule_name") return self._schedules.has_definition(schedule_name) def get_all_sensors(self) -> List[SensorDefinition]: return self._sensors.get_all_definitions() def get_sensor_names(self) -> List[str]: return self._sensors.get_definition_names() def get_sensor(self, sensor_name: str) -> SensorDefinition: return self._sensors.get_definition(sensor_name) def has_sensor(self, sensor_name: str) -> bool: return self._sensors.has_definition(sensor_name) def get_source_assets_by_key(self) -> Mapping[AssetKey, SourceAsset]: return self._source_assets_by_key def _check_solid_defs(self, pipelines: List[PipelineDefinition]) -> None: solid_defs = {} solid_to_pipeline = {} for pipeline in pipelines: for solid_def in [*pipeline.all_node_defs, pipeline.graph]: # skip checks for subselected graphs because they don't have their own names if isinstance(solid_def, SubselectedGraphDefinition): break if solid_def.name not in solid_defs: solid_defs[solid_def.name] = solid_def solid_to_pipeline[solid_def.name] = pipeline.name if not solid_defs[solid_def.name] is solid_def: first_name, second_name = sorted( [solid_to_pipeline[solid_def.name], pipeline.name] ) raise DagsterInvalidDefinitionError( ( f"Conflicting definitions found in repository with name '{solid_def.name}'. " "Op/Graph/Solid definition names must be unique within a " f"repository. {solid_def.__class__.__name__} is defined in {pipeline.target_type} " f"'{first_name}' and in {pipeline.target_type} '{second_name}'." ) ) def _validate_pipeline(self, pipeline: PipelineDefinition) -> PipelineDefinition: return pipeline def _validate_job(self, job: JobDefinition) -> JobDefinition: return job def _validate_schedule(self, schedule: ScheduleDefinition) -> ScheduleDefinition: pipelines = self.get_pipeline_names() if schedule.pipeline_name not in pipelines: raise DagsterInvalidDefinitionError( f'ScheduleDefinition "{schedule.name}" targets job/pipeline "{schedule.pipeline_name}" ' "which was not found in this repository." ) return schedule def _validate_sensor(self, sensor: SensorDefinition) -> SensorDefinition: pipelines = self.get_pipeline_names() if len(sensor.targets) == 0: # skip validation when the sensor does not target a pipeline return sensor for target in sensor.targets: if target.pipeline_name not in pipelines: raise DagsterInvalidDefinitionError( f'SensorDefinition "{sensor.name}" targets job/pipeline "{sensor.pipeline_name}" ' "which was not found in this repository." ) return sensor def _validate_partition_set( self, partition_set: PartitionSetDefinition ) -> PartitionSetDefinition: return partition_set class RepositoryDefinition: """Define a repository that contains a group of definitions. Users should typically not create objects of this class directly. Instead, use the :py:func:`@repository` decorator. Args: name (str): The name of the repository. repository_data (RepositoryData): Contains the definitions making up the repository. description (Optional[str]): A string description of the repository. """ def __init__( self, name, repository_data, description=None, ): self._name = check_valid_name(name) self._description = check.opt_str_param(description, "description") self._repository_data = check.inst_param(repository_data, "repository_data", RepositoryData) @property def name(self) -> str: return self._name @property def description(self) -> Optional[str]: return self._description def load_all_definitions(self): # force load of all lazy constructed code artifacts self._repository_data.load_all_definitions() @property def pipeline_names(self) -> List[str]: """List[str]: Names of all pipelines/jobs in the repository""" return self._repository_data.get_pipeline_names() @property def job_names(self) -> List[str]: """List[str]: Names of all jobs in the repository""" return self._repository_data.get_job_names() def has_pipeline(self, name: str) -> bool: """Check if a pipeline/job with a given name is present in the repository. Args: name (str): The name of the pipeline/job. Returns: bool """ return self._repository_data.has_pipeline(name) def get_pipeline(self, name: str) -> PipelineDefinition: """Get a pipeline/job by name. If this pipeline/job is present in the lazily evaluated dictionary passed to the constructor, but has not yet been constructed, only this pipeline/job is constructed, and will be cached for future calls. Args: name (str): Name of the pipeline/job to retrieve. Returns: PipelineDefinition: The pipeline/job definition corresponding to the given name. """ return self._repository_data.get_pipeline(name) def get_all_pipelines(self) -> List[PipelineDefinition]: """Return all pipelines/jobs in the repository as a list. Note that this will construct any pipeline/job in the lazily evaluated dictionary that has not yet been constructed. Returns: List[PipelineDefinition]: All pipelines/jobs in the repository. """ return self._repository_data.get_all_pipelines() def has_job(self, name: str) -> bool: """Check if a job with a given name is present in the repository. Args: name (str): The name of the job. Returns: bool """ return self._repository_data.has_job(name) def get_job(self, name: str) -> JobDefinition: """Get a job by name. If this job is present in the lazily evaluated dictionary passed to the constructor, but has not yet been constructed, only this job is constructed, and will be cached for future calls. Args: name (str): Name of the job to retrieve. Returns: JobDefinition: The job definition corresponding to the given name. """ return self._repository_data.get_job(name) def get_all_jobs(self) -> List[JobDefinition]: """Return all jobs in the repository as a list. Note that this will construct any job in the lazily evaluated dictionary that has not yet been constructed. Returns: List[JobDefinition]: All jobs in the repository. """ return self._repository_data.get_all_jobs() @property def partition_set_defs(self) -> List[PartitionSetDefinition]: return self._repository_data.get_all_partition_sets() def get_partition_set_def(self, name: str) -> PartitionSetDefinition: return self._repository_data.get_partition_set(name) @property def schedule_defs(self) -> List[ScheduleDefinition]: return self._repository_data.get_all_schedules() def get_schedule_def(self, name: str) -> ScheduleDefinition: return self._repository_data.get_schedule(name) def has_schedule_def(self, name: str) -> bool: return self._repository_data.has_schedule(name) @property def sensor_defs(self) -> List[SensorDefinition]: return self._repository_data.get_all_sensors() def get_sensor_def(self, name: str) -> SensorDefinition: return self._repository_data.get_sensor(name) def has_sensor_def(self, name: str) -> bool: return self._repository_data.has_sensor(name) @property def source_assets_by_key(self) -> Dict[AssetKey, SourceAsset]: return self._repository_data.get_source_assets_by_key() # If definition comes from the @repository decorator, then the __call__ method will be # overwritten. Therefore, we want to maintain the call-ability of repository definitions. def __call__(self, *args, **kwargs): return self def _process_and_validate_target( schedule_or_sensor_def: Union[SensorDefinition, ScheduleDefinition], coerced_graphs: Dict[str, JobDefinition], unresolved_jobs: Dict[str, UnresolvedAssetJobDefinition], pipelines_or_jobs: Dict[str, PipelineDefinition], target: Union[GraphDefinition, PipelineDefinition, UnresolvedAssetJobDefinition], ): # This function modifies the state of coerced_graphs and unresolved_jobs targeter = ( f"schedule '{schedule_or_sensor_def.name}'" if isinstance(schedule_or_sensor_def, ScheduleDefinition) else f"sensor '{schedule_or_sensor_def.name}'" ) if isinstance(target, GraphDefinition): if target.name not in coerced_graphs: # Since this is a graph we have to coerce, it is not possible to be # the same definition by reference equality if target.name in pipelines_or_jobs: dupe_target_type = pipelines_or_jobs[target.name].target_type raise DagsterInvalidDefinitionError( _get_error_msg_for_target_conflict( targeter, "graph", target.name, dupe_target_type ) ) elif coerced_graphs[target.name].graph != target: raise DagsterInvalidDefinitionError( _get_error_msg_for_target_conflict(targeter, "graph", target.name, "graph") ) coerced_job = target.coerce_to_job() coerced_graphs[target.name] = coerced_job pipelines_or_jobs[target.name] = coerced_job elif isinstance(target, UnresolvedAssetJobDefinition): if target.name not in unresolved_jobs: # Since this is am unresolved job we have to resolve, it is not possible to # be the same definition by reference equality if target.name in pipelines_or_jobs: dupe_target_type = pipelines_or_jobs[target.name].target_type raise DagsterInvalidDefinitionError( _get_error_msg_for_target_conflict( targeter, "unresolved asset job", target.name, dupe_target_type ) ) elif unresolved_jobs[target.name].selection != target.selection: raise DagsterInvalidDefinitionError( _get_error_msg_for_target_conflict( targeter, "unresolved asset job", target.name, "unresolved asset job" ) ) unresolved_jobs[target.name] = target else: if target.name in pipelines_or_jobs and pipelines_or_jobs[target.name] != target: dupe_target_type = ( "graph" if target.name in coerced_graphs else "unresolved asset job" if target.name in unresolved_jobs else pipelines_or_jobs[target.name].target_type ) raise DagsterInvalidDefinitionError( _get_error_msg_for_target_conflict( targeter, target.target_type, target.name, dupe_target_type ) ) pipelines_or_jobs[target.name] = target def _get_error_msg_for_target_conflict(targeter, target_type, target_name, dupe_target_type): return f"{targeter} targets {target_type} '{target_name}', but a different {dupe_target_type} with the same name was provided. Disambiguate between these by providing a separate name to one of them."
# -*- coding: utf-8 -*- import torch import numpy as np import contextlib import gc import io import inspect import itertools import math import random import re import copy import os import tempfile import unittest import warnings import types import pickle import textwrap import subprocess import weakref import sys from torch.utils.dlpack import from_dlpack, to_dlpack from torch._six import inf, nan, string_classes from itertools import product, combinations, permutations from functools import partial from torch import multiprocessing as mp from torch.testing._internal.common_utils import ( TestCase, TEST_WITH_ROCM, run_tests, IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN, do_test_dtypes, IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, slowTest, skipCUDAMemoryLeakCheckIf, BytesIOContext, noarchTest, skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName, wrapDeterministicFlagAPITest, DeterministicGuard, make_tensor) from multiprocessing.reduction import ForkingPickler from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, skipCUDAVersionIn, onlyCUDA, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast, skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyOnCPUAndCUDA, expectedAlertNondeterministic) from typing import Dict, List, Tuple import torch.backends.quantized import torch.testing._internal.data from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32 # Protects against includes accidentally setting the default dtype assert torch.get_default_dtype() is torch.float32 # load_tests from torch.testing._internal.common_utils is used to automatically filter tests for # sharding on sandcastle. This line silences flake warnings load_tests = load_tests AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() # Wrap base test class into a class to hide it from testing # See https://stackoverflow.com/a/25695512 class AbstractTestCases: # This is intentionally prefixed by an underscore. Otherwise pytest will try to # run its methods as test cases. class _TestTorchMixin(TestCase): def _make_tensors(self, shape, val_range=(-100, 100), use_floating=True, use_integral=True, use_complex=False) -> Dict[str, List[torch.Tensor]]: float_types = [torch.double, torch.float] int_types = [torch.int64, torch.int32, torch.int16] complex_types = [torch.complex64, torch.complex128] def make_contiguous(shape, dtype) -> torch.Tensor: if dtype in float_types: val = torch.randn(shape, dtype=dtype) val = val * ((val_range[1] - val_range[0]) / (math.pi * 2.0)) val = val + ((val_range[1] - val_range[0]) / 2.0) val = torch.clamp(val, min=val_range[0], max=val_range[1]) return val result = torch.zeros(shape, dtype=dtype) result.apply_(lambda x: random.randint(val_range[0], val_range[1])) return result def make_non_contiguous(shape, dtype) -> torch.Tensor: contig = make_contiguous(shape, dtype) non_contig = torch.empty(shape + (2, 2), dtype=dtype)[..., 0] non_contig = non_contig.select(-1, -1) non_contig.copy_(contig) self.assertFalse(non_contig.is_contiguous()) return non_contig def make_contiguous_slice(size, dtype) -> torch.Tensor: contig = make_contiguous((1, size), dtype) non_contig = contig[:1, 1:size - 1] self.assertTrue(non_contig.is_contiguous()) return contig types = [] if use_floating: types += float_types if use_integral: types += int_types if use_complex: types += complex_types tensors: Dict[str, List[torch.Tensor]] = {"cont": [], "noncont": [], "slice": []} for dtype in types: tensors["cont"].append(make_contiguous(shape, dtype)) tensors["noncont"].append(make_non_contiguous(shape, dtype)) tensors["slice"].append(make_contiguous_slice(sum(list(shape)), dtype)) return tensors def test_dir(self): dir(torch) @wrapDeterministicFlagAPITest def test_deterministic_flag(self): for deterministic in [True, False]: torch.use_deterministic_algorithms(deterministic) self.assertEqual(deterministic, torch.are_deterministic_algorithms_enabled()) with self.assertRaisesRegex(RuntimeError, r"use_deterministic_algorithms expects a bool, but got int"): torch.use_deterministic_algorithms(1) def test_type_conversion_via_dtype_name(self): x = torch.tensor([1]) self.assertEqual(x.byte().dtype, torch.uint8) self.assertEqual(x.bool().dtype, torch.bool) self.assertEqual(x.char().dtype, torch.int8) self.assertEqual(x.double().dtype, torch.float64) self.assertEqual(x.float().dtype, torch.float32) self.assertEqual(x.half().dtype, torch.float16) self.assertEqual(x.int().dtype, torch.int32) self.assertEqual(x.bfloat16().dtype, torch.bfloat16) cfloat = x.cfloat() self.assertEqual(cfloat.dtype, torch.complex64) self.assertEqual(cfloat.real, x.float()) self.assertEqual(cfloat.imag, torch.zeros_like(cfloat.imag)) cdouble = x.cdouble() self.assertEqual(cdouble.dtype, torch.complex128) self.assertEqual(cdouble.real, x.double()) self.assertEqual(cdouble.imag, torch.zeros_like(cdouble.imag)) def test_doc_template(self) -> None: from torch._torch_docs import __file__ as doc_file from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args with open(doc_file, "r", encoding="utf-8") as f: doc_strs = f.read() for doc_str in re.findall(r'add_docstr\((.*?),.*?("""|\'\'\')(.*?)("""|\'\'\')\)', doc_strs, re.MULTILINE | re.DOTALL): for common_args in [multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args]: for k, v in common_args.items(): self.assertNotIn(v, doc_str[2], 'The argument description "{}" in {} can be ' 'replaced by {{{}}}'.format(v, doc_str[0], k)) def test_doc(self): checked_types = (types.MethodType, types.FunctionType, types.BuiltinFunctionType, types.BuiltinMethodType) def test_namespace(ns, *skips): if isinstance(ns, object): ns_name = ns.__class__.__name__ else: ns_name = ns.__name__ skip_regexes = [] for r in skips: if isinstance(r, string_classes): skip_regexes.append(re.compile('^{}$'.format(re.escape(r)))) else: skip_regexes.append(r) for name in dir(ns): if name.startswith('_'): continue if name in ['real', 'imag']: y = torch.randn(1, dtype=torch.cfloat) var = getattr(y, name) else: var = getattr(ns, name) if not isinstance(var, checked_types): continue doc = var.__doc__ has_doc = doc is not None and len(doc.strip()) > 0 full_name = ns_name + '.' + name if any(r.match(name) for r in skip_regexes): self.assertFalse(has_doc, 'New docs have been added for {}, please remove ' 'it from the skipped list in TestTorch.test_doc'.format(full_name)) else: self.assertTrue(has_doc, '{} is missing documentation'.format(full_name)) # FIXME: All of the following should be marked as expected failures # so that it is easier to tell when missing has been added. # FIXME: fix all the skipped ones below! test_namespace(torch.randn(1), 'as_strided_', re.compile('^clamp_(min|max)_?$'), 'is_distributed', 'is_nonzero', 'is_same_size', 'log_softmax', 'map2_', 'new', 'reinforce', 'relu', 'relu_', 'prelu', 'resize', 'resize_as', 'softmax', 'split_with_sizes', 'unsafe_split_with_sizes', ) test_namespace(torch.nn) test_namespace(torch.nn.functional, 'assert_int_or_pair') # TODO: add torch.* tests when we have proper namespacing on ATen functions # test_namespace(torch) def test_msnpu_error(self): with self.assertRaisesRegex(RuntimeError, "Could not run 'aten::empty.memory_format' with arguments from the 'MSNPU' backend"): torch.zeros(1, device=torch.device('msnpu')) def test_has_storage(self): self.assertIsNotNone(torch.tensor([]).storage()) self.assertIsNotNone(torch.empty(0).storage()) self.assertIsNotNone(torch.tensor([]).clone().storage()) self.assertIsNotNone(torch.tensor([0, 0, 0]).nonzero().storage()) self.assertIsNotNone(torch.tensor([]).new().storage()) def test_where_invalid_device(self): if torch.cuda.is_available(): for devices in [('cpu', 'cuda', 'cuda'), ('cuda', 'cpu', 'cpu'), ('cuda', 'cpu', 'cuda'), ('cpu', 'cuda', 'cpu')]: condition = torch.rand(16, device=devices[0]) x = torch.rand(16, device=devices[1]) y = torch.rand(16, device=devices[2]) with self.assertRaisesRegex(RuntimeError, "Expected condition, x and y to be on the same device"): torch.where(condition, x, y) def test_where_bool_tensor(self): for d in torch.testing.get_all_device_types(): a = torch.tensor([True, False], device=d) res = torch.where(a > 0) self.assertEqual(1, len(res)) def test_where_tensor(self): def rand_tensor(size, dtype, device): if dtype.is_floating_point or dtype.is_complex: return torch.rand(size=size, dtype=dtype, device=device) elif dtype == torch.uint8: return torch.randint(1, 5, size=size, dtype=dtype, device=device) elif dtype == torch.bool: return torch.randint(0, 1, size=size, dtype=dtype, device=device).bool() else: return torch.randint(-5, 5, size=size, dtype=dtype, device=device) def get_tensor(size, dtype, device, contiguous): if not contiguous and len(size) < 2: raise RuntimeError("Unable to generate non contiguous tensor with size < 2") t = rand_tensor(size, dtype, device) if contiguous: return t else: return t.transpose(0, 1) height = 5 width = 5 for device in torch.testing.get_all_device_types(): for dt1 in torch.testing.get_all_dtypes(): for dt2 in torch.testing.get_all_dtypes(): for contiguous in [True, False]: x1 = get_tensor((height, width), dt1, device, contiguous) x2 = get_tensor((height, width), dt2, device, contiguous) if dt1 != dt2: self.assertRaisesRegex(RuntimeError, "expected scalar type", lambda: torch.where(x1 == 1, x1, x2)) else: if x1.is_floating_point(): condition = (x1 < 0.5) elif x1.is_complex(): condition = (x1.abs() < 0.5) else: condition = (x1 == 1) expected = condition.to(x1.dtype) * x1 + (~condition).to(x2.dtype) * x2 result = torch.where(condition, x1, x2) self.assertEqual(expected, result) def test_dtypes(self): all_dtypes = torch.testing.get_all_dtypes() do_test_dtypes(self, all_dtypes, torch.strided, torch.device('cpu')) if torch.cuda.is_available(): all_dtypes.remove(torch.bfloat16) # Remove once _th_zero_ is enabled on cuda for bfloat16 do_test_dtypes(self, all_dtypes, torch.strided, torch.device('cuda:0')) def test_copy_dtypes(self): all_dtypes = torch.testing.get_all_dtypes() for dtype in all_dtypes: copied_dtype = copy.deepcopy(dtype) self.assertIs(dtype, copied_dtype) def test_copy_transpose(self): x = torch.arange(100 * 100, dtype=torch.float).reshape(100, 100).t() y = torch.empty(100, 100, dtype=torch.float) y.copy_(x) self.assertEqual(y[:, 0], range(100)) self.assertEqual(y[:, 40], range(4000, 4100)) y = torch.empty(100, 100, dtype=torch.double) y.copy_(x) self.assertEqual(y[:, 0], range(100)) self.assertEqual(y[:, 40], range(4000, 4100)) # Validates regression reported in https://github.com/pytorch/pytorch/issues/45269 x = torch.arange(100 * 100).reshape(100, 100).to(dtype=torch.cfloat).t() y = torch.empty(100, 100, dtype=torch.cfloat) y.copy_(x) self.assertEqual(y[:, 0], range(100)) self.assertEqual(y[:, 40], range(4000, 4100)) def test_device(self): cpu = torch.device('cpu') self.assertEqual('cpu', str(cpu)) self.assertEqual('cpu', cpu.type) self.assertEqual(None, cpu.index) cpu0 = torch.device('cpu:0') self.assertEqual('cpu:0', str(cpu0)) self.assertEqual('cpu', cpu0.type) self.assertEqual(0, cpu0.index) cpu0 = torch.device('cpu', 0) self.assertEqual('cpu:0', str(cpu0)) self.assertEqual('cpu', cpu0.type) self.assertEqual(0, cpu0.index) cuda = torch.device('cuda') self.assertEqual('cuda', str(cuda)) self.assertEqual('cuda', cuda.type) self.assertEqual(None, cuda.index) cuda1 = torch.device('cuda:1') self.assertEqual('cuda:1', str(cuda1)) self.assertEqual('cuda', cuda1.type) self.assertEqual(1, cuda1.index) cuda1 = torch.device('cuda', 1) self.assertEqual('cuda:1', str(cuda1)) self.assertEqual('cuda', cuda1.type) self.assertEqual(1, cuda1.index) cuda90 = torch.device('cuda', 90) self.assertEqual('cuda:90', str(cuda90)) self.assertEqual('cuda', cuda90.type) self.assertEqual(90, cuda90.index) self.assertRaises(RuntimeError, lambda: torch.device('cpu:-1')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:-1')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2 ')) self.assertRaises(RuntimeError, lambda: torch.device('cuda: 2')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2 2')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2.')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2?')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:?2')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2.232')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2 cuda:3')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2+cuda:3')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2cuda:3')) self.assertRaises(RuntimeError, lambda: torch.device(-1)) self.assertRaises(RuntimeError, lambda: torch.device('other')) self.assertRaises(RuntimeError, lambda: torch.device('other:0')) device_set = {'cpu', 'cpu:0', 'cuda', 'cuda:0', 'cuda:1', 'cuda:10', 'cuda:100'} device_hash_set = set() for device in list(device_set): device_hash_set.add(hash(torch.device(device))) self.assertEqual(len(device_set), len(device_hash_set)) def get_expected_device_repr(device): if device.index is not None: return "device(type='{type}', index={index})".format( type=device.type, index=device.index) return "device(type='{type}')".format(type=device.type) for device in device_set: dev = torch.device(device) self.assertEqual(repr(dev), get_expected_device_repr(dev)) def test_to(self): def test_copy_behavior(t, non_blocking=False): self.assertIs(t, t.to(t, non_blocking=non_blocking)) self.assertIs(t, t.to(t.dtype, non_blocking=non_blocking)) self.assertIs(t, t.to(torch.empty_like(t), non_blocking=non_blocking)) self.assertIsNot(t, t.to(t, non_blocking=non_blocking, copy=True)) self.assertIsNot(t, t.to(t.dtype, non_blocking=non_blocking, copy=True)) self.assertIsNot(t, t.to(torch.empty_like(t), non_blocking=non_blocking, copy=True)) devices = [t.device] if t.device.type == 'cuda': if t.device.index == -1: devices.append('cuda:{}'.format(torch.cuda.current_device())) elif t.device.index == torch.cuda.current_device(): devices.append('cuda') for device in devices: self.assertIs(t, t.to(device, non_blocking=non_blocking)) self.assertIs(t, t.to(device, t.dtype, non_blocking=non_blocking)) self.assertIsNot(t, t.to(device, non_blocking=non_blocking, copy=True)) self.assertIsNot(t, t.to(device, t.dtype, non_blocking=non_blocking, copy=True)) a = torch.tensor(5) test_copy_behavior(a) self.assertEqual(a.device, a.to('cpu').device) self.assertEqual(a.device, a.to('cpu', dtype=torch.float32).device) self.assertIs(torch.float32, a.to('cpu', dtype=torch.float32).dtype) self.assertEqual(a.device, a.to(torch.float32).device) self.assertIs(torch.float32, a.to(dtype=torch.float32).dtype) self.assertEqual(a.data_ptr(), a.to('cpu').data_ptr()) self.assertEqual(a.data_ptr(), a.to(dtype=a.dtype, device=a.device, copy=False).data_ptr()) self.assertEqual(a.data_ptr(), a.to('cpu', copy=False).data_ptr()) self.assertNotEqual(a.data_ptr(), a.to('cpu', copy=True).data_ptr()) if torch.cuda.is_available(): for non_blocking in [True, False]: for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']: b = torch.tensor(5., device=cuda) test_copy_behavior(b, non_blocking) self.assertEqual(b.device, b.to(cuda, non_blocking=non_blocking).device) self.assertEqual(a.device, b.to('cpu', non_blocking=non_blocking).device) self.assertEqual(b.device, a.to(cuda, non_blocking=non_blocking).device) self.assertIs(torch.int32, b.to('cpu', dtype=torch.int32, non_blocking=non_blocking).dtype) self.assertEqual(a.device, b.to('cpu', dtype=torch.int32, non_blocking=non_blocking).device) self.assertIs(torch.int32, b.to(dtype=torch.int32).dtype) self.assertEqual(b.device, b.to(dtype=torch.int32).device) def test_to_with_tensor(self): a = torch.tensor(5) self.assertEqual(a.device, a.to(a).device) if torch.cuda.is_available(): for non_blocking in [True, False]: for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']: b = torch.tensor(5., device=cuda) self.assertEqual(b.device, b.to(b, non_blocking=non_blocking).device) self.assertEqual(a.device, b.to(a, non_blocking=non_blocking).device) self.assertEqual(b.device, a.to(b, non_blocking=non_blocking).device) def test_as_subclass(self): class SubTensor(torch.Tensor): member_var = object() t0 = torch.tensor(0) t1 = torch.tensor([1, 2]) t2 = torch.tensor([[3, 4], [5, 6]]) s0 = t0.as_subclass(SubTensor) s1 = t1.as_subclass(SubTensor) s2 = t2.as_subclass(SubTensor) # Check that the correct type is returned. self.assertTrue(type(s0) is SubTensor) self.assertTrue(type(s1) is SubTensor) self.assertTrue(type(s2) is SubTensor) # Check that the data is equal. self.assertEqual(t0, s0) self.assertEqual(t1, s1) self.assertEqual(t2, s2) t0[()] = 1 t1[1] = 3 t2[1, 1] = 7 # Check that the data is equal even after modification. self.assertEqual(t0, s0) self.assertEqual(t1, s1) self.assertEqual(t2, s2) # Check that member variables are passed through. self.assertTrue(s0.member_var is SubTensor.member_var) self.assertTrue(s1.member_var is SubTensor.member_var) self.assertTrue(s2.member_var is SubTensor.member_var) # Test that autograd is propagated. t = torch.tensor(5, dtype=torch.float32, requires_grad=True) # Run a calculation on the tensor. exp_t = torch.exp(t) # Cast exp_t to a subclass. exp_s = exp_t.as_subclass(SubTensor) # Make sure that t.grad was initially None self.assertTrue(t.grad is None) # Run the autograd calculation. exp_s.backward() # Make sure autograd was propagated to the original tensor # declared with requires_grad. self.assertTrue(t.grad is not None) def test_type(self): x = torch.randn(3, 3).double() self.assertEqual(x.type('torch.FloatTensor').dtype, torch.float32) self.assertEqual(x.type(torch.FloatTensor).dtype, torch.float32) self.assertEqual(x.int().type(torch.Tensor).dtype, torch.get_default_dtype()) self.assertEqual(x.type(torch.int32).dtype, torch.int32) def test_qengine(self): qengines = torch.backends.quantized.supported_engines original_qe = torch.backends.quantized.engine for qe in qengines: torch.backends.quantized.engine = qe assert torch.backends.quantized.engine == qe, 'qengine not set successfully' torch.backends.quantized.engine = original_qe def _spawn_method(self, method, arg): try: mp.set_start_method('spawn') except RuntimeError: pass with mp.Pool(1) as pool: out: list = pool.map(method, [arg]) self.assertTrue(out[0]) @staticmethod def _test_multinomial_invalid_probs(probs): try: # n_sample = 1 is a special case, test n_sample=2 which is more general torch.multinomial(probs.to('cpu'), 2) return False # Should not be reached except RuntimeError as e: return 'probability tensor contains either `inf`, `nan` or element < 0' in str(e) @slowTest @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \ don't support multiprocessing with spawn start method") @unittest.skipIf(IS_WINDOWS, 'FIXME: CUDA OOM error on Windows') def test_multinomial_invalid_probs(self): test_method = AbstractTestCases._TestTorchMixin._test_multinomial_invalid_probs self._spawn_method(test_method, torch.tensor([1., -1., 1.])) self._spawn_method(test_method, torch.tensor([1., inf, 1.])) self._spawn_method(test_method, torch.tensor([1., -inf, 1.])) self._spawn_method(test_method, torch.tensor([1., 1., nan])) def test_copy_broadcast(self): torch.zeros(5, 6).copy_(torch.zeros(6)) self.assertRaises(RuntimeError, lambda: torch.zeros(5, 6).copy_(torch.zeros(30))) def test_copy_many_to_one(self): # Testing in-place copy where it attempt to write from many memory # storage to a single storage would cause RuntimeError to be thrown self.assertRaises(RuntimeError, lambda: torch.zeros(1, 6).expand(5, 6).copy_(torch.zeros(5, 6))) def test_slice(self): empty = torch.empty(0, 4) x = torch.arange(0., 16).view(4, 4) self.assertEqual(x[:], x) self.assertEqual(x[:4], x) # start and stop are clamped to the size of dim self.assertEqual(x[:5], x) # if start >= stop then the result is empty self.assertEqual(x[2:1], empty) self.assertEqual(x[2:2], empty) # out of bounds is also empty self.assertEqual(x[10:12], empty) # additional correctness checks self.assertEqual(x[:1].tolist(), [[0, 1, 2, 3]]) self.assertEqual(x[:-3].tolist(), [[0, 1, 2, 3]]) self.assertEqual(x[:, -2:3].tolist(), [[2], [6], [10], [14]]) self.assertEqual(x[0:-1:2].tolist(), [[0, 1, 2, 3], [8, 9, 10, 11]]) @unittest.skip("Not implemented yet") def test_conv2(self): x = torch.rand(math.floor(torch.uniform(50, 100)), math.floor(torch.uniform(50, 100))) k = torch.rand(math.floor(torch.uniform(10, 20)), math.floor(torch.uniform(10, 20))) imvc = torch.conv2(x, k) imvc2 = torch.conv2(x, k, 'V') imfc = torch.conv2(x, k, 'F') ki = k.clone() ks = k.storage() kis = ki.storage() for i in range(ks.size() - 1, 0, -1): kis[ks.size() - i + 1] = ks[i] # for i=ks.size(), 1, -1 do kis[ks.size()-i+1]=ks[i] end imvx = torch.xcorr2(x, ki) imvx2 = torch.xcorr2(x, ki, 'V') imfx = torch.xcorr2(x, ki, 'F') self.assertEqual(imvc, imvc2, atol=0, rtol=0, msg='torch.conv2') self.assertEqual(imvc, imvx, atol=0, rtol=0, msg='torch.conv2') self.assertEqual(imvc, imvx2, atol=0, rtol=0, msg='torch.conv2') self.assertEqual(imfc, imfx, atol=0, rtol=0, msg='torch.conv2') self.assertLessEqual(math.abs(x.dot(x) - torch.xcorr2(x, x)[0][0]), 1e-10, 'torch.conv2') xx = torch.empty(2, x.size(1), x.size(2)) xx[1].copy_(x) xx[2].copy_(x) kk = torch.empty(2, k.size(1), k.size(2)) kk[1].copy_(k) kk[2].copy_(k) immvc = torch.conv2(xx, kk) immvc2 = torch.conv2(xx, kk, 'V') immfc = torch.conv2(xx, kk, 'F') self.assertEqual(immvc[0], immvc[1], atol=0, rtol=0, msg='torch.conv2') self.assertEqual(immvc[0], imvc, atol=0, rtol=0, msg='torch.conv2') self.assertEqual(immvc2[0], imvc2, atol=0, rtol=0, msg='torch.conv2') self.assertEqual(immfc[0], immfc[1], atol=0, rtol=0, msg='torch.conv2') self.assertEqual(immfc[0], imfc, atol=0, rtol=0, msg='torch.conv2') @unittest.skip("Not implemented yet") def test_conv3(self): x = torch.rand(math.floor(torch.uniform(20, 40)), math.floor(torch.uniform(20, 40)), math.floor(torch.uniform(20, 40))) k = torch.rand(math.floor(torch.uniform(5, 10)), math.floor(torch.uniform(5, 10)), math.floor(torch.uniform(5, 10))) imvc = torch.conv3(x, k) imvc2 = torch.conv3(x, k, 'V') imfc = torch.conv3(x, k, 'F') ki = k.clone() ks = k.storage() kis = ki.storage() for i in range(ks.size() - 1, 0, -1): kis[ks.size() - i + 1] = ks[i] imvx = torch.xcorr3(x, ki) imvx2 = torch.xcorr3(x, ki, 'V') imfx = torch.xcorr3(x, ki, 'F') self.assertEqual(imvc, imvc2, atol=0, rtol=0, msg='torch.conv3') self.assertEqual(imvc, imvx, atol=0, rtol=0, msg='torch.conv3') self.assertEqual(imvc, imvx2, atol=0, rtol=0, msg='torch.conv3') self.assertEqual(imfc, imfx, atol=0, rtol=0, msg='torch.conv3') self.assertLessEqual(math.abs(x.dot(x) - torch.xcorr3(x, x)[0][0][0]), 4e-10, 'torch.conv3') xx = torch.empty(2, x.size(1), x.size(2), x.size(3)) xx[1].copy_(x) xx[2].copy_(x) kk = torch.empty(2, k.size(1), k.size(2), k.size(3)) kk[1].copy_(k) kk[2].copy_(k) immvc = torch.conv3(xx, kk) immvc2 = torch.conv3(xx, kk, 'V') immfc = torch.conv3(xx, kk, 'F') self.assertEqual(immvc[0], immvc[1], atol=0, rtol=0, msg='torch.conv3') self.assertEqual(immvc[0], imvc, atol=0, rtol=0, msg='torch.conv3') self.assertEqual(immvc2[0], imvc2, atol=0, rtol=0, msg='torch.conv3') self.assertEqual(immfc[0], immfc[1], atol=0, rtol=0, msg='torch.conv3') self.assertEqual(immfc[0], imfc, atol=0, rtol=0, msg='torch.conv3') @unittest.skip("Not implemented yet") def _test_conv_corr_eq(self, fn, fn_2_to_3): ix = math.floor(random.randint(20, 40)) iy = math.floor(random.randint(20, 40)) iz = math.floor(random.randint(20, 40)) kx = math.floor(random.randint(5, 10)) ky = math.floor(random.randint(5, 10)) kz = math.floor(random.randint(5, 10)) x = torch.rand(ix, iy, iz) k = torch.rand(kx, ky, kz) o3 = fn(x, k) o32 = torch.zeros(o3.size()) fn_2_to_3(x, k, o3, o32) self.assertEqual(o3, o32) @unittest.skip("Not implemented yet") def test_xcorr3_xcorr2_eq(self): def reference(x, k, o3, o32): for i in range(o3.size(1)): for j in range(k.size(1)): o32[i].add(torch.xcorr2(x[i + j - 1], k[j])) self._test_conv_corr_eq(torch.xcorr3, reference) @unittest.skip("Not implemented yet") def test_xcorr3_xcorr2_eq_full(self): def reference(x, k, o3, o32): for i in range(x.size(1)): for j in range(k.size(1)): o32[i].add(torch.xcorr2(x[i], k[k.size(1) - j + 1], 'F')) self._test_conv_corr_eq(lambda x, k: torch.xcorr3(x, k, 'F'), reference) @unittest.skip("Not implemented yet") def test_conv3_conv2_eq_valid(self): def reference(x, k, o3, o32): for i in range(o3.size(1)): for j in range(k.size(1)): o32[i].add(torch.conv2(x[i + j - 1], k[k.size(1) - j + 1])) self._test_conv_corr_eq(torch.conv3, reference) @unittest.skip("Not implemented yet") def test_fconv3_fconv2_eq(self): def reference(x, k, o3, o32): for i in range(o3.size(1)): for j in range(k.size(1)): o32[i + j - 1].add(torch.conv2(x[i], k[j], 'F')) self._test_conv_corr_eq(lambda x, k: torch.conv3(x, k, 'F'), reference) def test_dtype_is_signed(self): for dtype in torch.testing.get_all_dtypes(): self.assertEqual(dtype.is_signed, torch.is_signed(torch.tensor(0, dtype=dtype))) self.assertRaisesRegex(RuntimeError, 'not supported for quantized', lambda: torch.quint8.is_signed) self.assertRaisesRegex(RuntimeError, 'not supported for quantized', lambda: torch.qint8.is_signed) self.assertRaisesRegex(RuntimeError, 'not supported for quantized', lambda: torch.qint32.is_signed) def test_RNGState(self): state = torch.get_rng_state() stateCloned = state.clone() before = torch.rand(1000) self.assertEqual(state.ne(stateCloned).long().sum(), 0, atol=0, rtol=0) torch.set_rng_state(state) after = torch.rand(1000) self.assertEqual(before, after, atol=0, rtol=0) def test_RNGStateAliasing(self): # Fork the random number stream at this point gen = torch.Generator() gen.set_state(torch.get_rng_state()) self.assertEqual(gen.get_state(), torch.get_rng_state()) target_value = torch.rand(1000) # Dramatically alter the internal state of the main generator _ = torch.rand(100000) forked_value = torch.rand(1000, generator=gen) self.assertEqual(target_value, forked_value, atol=0, rtol=0, msg="RNG has not forked correctly.") def test_RNG_after_pickle(self): torch.random.manual_seed(100) before = torch.rand(10) torch.random.manual_seed(100) buf = io.BytesIO() tensor = torch.tensor([1, 2, 3]) ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(tensor) after = torch.rand(10) self.assertEqual(before, after, atol=0, rtol=0) def test_boxMullerState(self): torch.manual_seed(123) odd_number = 101 seeded = torch.randn(odd_number) state = torch.get_rng_state() midstream = torch.randn(odd_number) torch.set_rng_state(state) repeat_midstream = torch.randn(odd_number) torch.manual_seed(123) reseeded = torch.randn(odd_number) self.assertEqual(midstream, repeat_midstream, atol=0, rtol=0, msg='get_rng_state/set_rng_state not generating same sequence of normally distributed numbers') self.assertEqual(seeded, reseeded, atol=0, rtol=0, msg='repeated calls to manual_seed not generating same sequence of normally distributed numbers') def test_manual_seed(self): rng_state = torch.get_rng_state() torch.manual_seed(2) x = torch.randn(100) self.assertEqual(torch.initial_seed(), 2) torch.manual_seed(2) y = torch.randn(100) self.assertEqual(x, y) max_int64 = 0x7fff_ffff_ffff_ffff min_int64 = -max_int64 - 1 max_uint64 = 0xffff_ffff_ffff_ffff # Check all boundary cases of valid seed value inputs test_cases = [ # (seed, expected_initial_seed) # Positive seeds should be unchanged (max_int64, max_int64), (max_int64 + 1, max_int64 + 1), (max_uint64, max_uint64), (0, 0), # Negative seeds wrap around starting from the largest seed value (-1, max_uint64), (min_int64, max_int64 + 1) ] for seed, expected_initial_seed in test_cases: torch.manual_seed(seed) actual_initial_seed = torch.initial_seed() msg = "expected initial_seed() = %x after calling manual_seed(%x), but got %x instead" % ( expected_initial_seed, seed, actual_initial_seed) self.assertEqual(expected_initial_seed, actual_initial_seed, msg=msg) for invalid_seed in [min_int64 - 1, max_uint64 + 1]: with self.assertRaisesRegex(RuntimeError, r'Overflow when unpacking long'): torch.manual_seed(invalid_seed) torch.set_rng_state(rng_state) def test_numel(self): b = torch.ByteTensor(3, 100, 100) self.assertEqual(b.nelement(), 3 * 100 * 100) self.assertEqual(b.numel(), 3 * 100 * 100) def test_empty_storage_view(self): # we should be able to "modify" slices of a 0-element # array without an error being raised due to # trying to resize its storage t = torch.from_numpy(np.empty((0, 4))) t[:, 1::2] *= 1 def test_newaxis_numpy_comparison(self): def run_test(tensor, *idx): npt = tensor.numpy() self.assertEqual(tensor[idx], npt[idx]) # 1D Tensor Tests x = torch.arange(0, 10) cases = [ [None], [None, None], [Ellipsis, None], [None, Ellipsis], [2, None], [None, 2], [Ellipsis, None, 2], [Ellipsis, 2, None], [2, Ellipsis, None], [2, None, Ellipsis], [None, 2, Ellipsis], [None, Ellipsis, 2], ] for case in cases: run_test(x, *case) # 2D Tensor Tests x = torch.arange(0, 12).view(3, 4) cases = [ [None], [None, None], [None, None, None], [Ellipsis, None], [Ellipsis, None, None], [None, Ellipsis], [None, Ellipsis, None], [None, None, Ellipsis], [2, None], [2, None, Ellipsis], [2, Ellipsis, None], [None, 2, Ellipsis], [Ellipsis, 2, None], [Ellipsis, None, 2], [None, Ellipsis, 2], [1, 2, None], [1, 2, Ellipsis, None], [1, Ellipsis, 2, None], [Ellipsis, 1, None, 2], [Ellipsis, 1, 2, None], [1, None, 2, Ellipsis], [None, 1, Ellipsis, 2], [None, 1, 2, Ellipsis], ] for case in cases: run_test(x, *case) def _consecutive(self, size, start=1): sequence = torch.ones(torch.tensor(size).prod(0)).cumsum(0) sequence.add_(start - 1) return sequence.resize_(*size) def test_newindex(self): reference = self._consecutive((3, 3, 3)) # This relies on __index__() being correct - but we have separate tests for that def checkPartialAssign(index): reference = torch.zeros(3, 3, 3) reference[index] = self._consecutive((3, 3, 3))[index] self.assertEqual(reference[index], self._consecutive((3, 3, 3))[index], atol=0, rtol=0) reference[index] = 0 self.assertEqual(reference, torch.zeros(3, 3, 3), atol=0, rtol=0) checkPartialAssign(0) checkPartialAssign(1) checkPartialAssign(2) checkPartialAssign((0, 1)) checkPartialAssign((1, 2)) checkPartialAssign((0, 2)) checkPartialAssign(torch.LongTensor((0, 2))) with self.assertRaises(IndexError): reference[1, 1, 1, 1] = 1 with self.assertRaises(IndexError): reference[1, 1, 1, (1, 1)] = 1 with self.assertRaises(IndexError): reference[3, 3, 3, 3, 3, 3, 3, 3] = 1 with self.assertRaises(IndexError): reference[0.0] = 1 with self.assertRaises(TypeError): reference[0.0:2.0] = 1 with self.assertRaises(IndexError): reference[0.0, 0.0:2.0] = 1 with self.assertRaises(IndexError): reference[0.0, :, 0.0:2.0] = 1 with self.assertRaises(IndexError): reference[0.0, ..., 0.0:2.0] = 1 with self.assertRaises(IndexError): reference[0.0, :, 0.0] = 1 def test_index_add(self): for device in torch.testing.get_all_device_types(): for dest_contig, src_contig, index_contig in product([True, False], repeat=3): for other_sizes in ((), (4, 5)): for dtype in [torch.int, torch.long]: num_copy, num_dest = 3, 3 dest = torch.randn(num_dest, *other_sizes, device=device) if not dest_contig: dest = torch.testing.make_non_contiguous(dest) src = torch.randn(num_copy, *other_sizes, device=device) if not src_contig: src = torch.testing.make_non_contiguous(src) idx = torch.randperm(num_dest, dtype=dtype, device=device).narrow(0, 0, num_copy) if not index_contig: idx = torch.testing.make_non_contiguous(idx) # index_add_ without alpha argument dest2 = dest.clone() dest.index_add_(0, idx, src) for i in range(idx.size(0)): dest2[idx[i]] += src[i] self.assertEqual(dest, dest2) # index_add_ with alpha argument dest2 = dest.clone() dest.index_add_(0, idx, src, alpha=2) for i in range(idx.size(0)): dest2[idx[i]] += src[i] * 2 self.assertEqual(dest, dest2) # add coverage for issue with atomic add that appeared only for # specific dtypes on cuda: # https://github.com/pytorch/pytorch/issues/29153 def test_index_add_all_dtypes(self): for device in torch.testing.get_all_device_types(): for dtype in torch.testing.get_all_math_dtypes(device): for idx_dtype in [torch.int, torch.long]: size = [5, 5] if dtype.is_floating_point or dtype.is_complex: tensor = torch.rand(size, dtype=dtype, device=device) elif dtype.is_signed: tensor = torch.randint(-5, 15, size, dtype=dtype, device=device) else: tensor = torch.randint(0, 10, size, dtype=dtype, device=device) # index_add calls atomicAdd on cuda. zeros = torch.zeros(size, dtype=dtype, device=device) added = zeros.index_add(0, torch.arange(0, size[0], dtype=idx_dtype, device=device), tensor) self.assertEqual(added, tensor) added = zeros.index_add(0, torch.arange(0, size[0], dtype=idx_dtype, device=device), tensor, alpha=-1) self.assertEqual(added, -tensor) # Fill idx with valid indices. @staticmethod def _fill_indices(self, idx, dim, dim_size, elems_per_row, m, n, o): for i in range(1 if dim == 0 else m): for j in range(1 if dim == 1 else n): for k in range(1 if dim == 2 else o): ii = [i, j, k] ii[dim] = slice(0, idx.size(dim) + 1) idx[tuple(ii)] = torch.randperm(dim_size)[0:elems_per_row] def test_unflatten(self): # test args: tensor, int, sizes self.assertEqual(torch.tensor([]).unflatten(0, (0, 1)), torch.empty(0, 1)) self.assertEqual(torch.tensor([1]).unflatten(0, (1, 1)), torch.tensor([[1]])) self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, (2, 2)), torch.tensor([[1, 2], [3, 4]])) self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, [2, 2]), torch.tensor([[1, 2], [3, 4]])) self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, torch.Size([2, 2])), torch.tensor([[1, 2], [3, 4]])) self.assertEqual(torch.ones(2, 10).unflatten(1, (5, 2)), torch.ones(2, 5, 2)) self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, (-1, 2)), torch.tensor([[1, 2], [3, 4]])) self.assertEqual(torch.ones(2, 10).unflatten(1, (5, -1)), torch.ones(2, 5, 2)) self.assertEqual(torch.ones(2, 10).unflatten(1, (-1,)), torch.ones(2, 10)) self.assertEqual(torch.ones(2, 3 * 4 * 5 * 6).unflatten(1, (3, 4, -1, 6)), torch.ones(2, 3, 4, 5, 6)) self.assertEqual(torch.ones(2, 0, 2).unflatten(1, (3, -1, 4, 5)), torch.ones(2, 3, 0, 4, 5, 2)) # test invalid args: tensor, str, sizes with self.assertRaisesRegex(TypeError, r"received an invalid combination of arguments"): torch.tensor([1]).unflatten('A', (1, 1)) # test invalid args: tensor, str, namedshape with self.assertRaisesRegex(RuntimeError, r"Name 'A' not found in Tensor\[None\]."): torch.ones(4).unflatten('A', (('A', 2), ('B', 2))) # test other invalid arguments with self.assertRaisesRegex(RuntimeError, r"sizes must be non-empty"): torch.tensor([1]).unflatten(0, []) with self.assertRaisesRegex(RuntimeError, r"Provided sizes \[2, 2\] don't multiply up to the size of dim 0 \(1\)"): torch.tensor([1]).unflatten(0, [2, 2]) with self.assertRaisesRegex(IndexError, r"dimension specified as 0 but tensor has no dimensions"): torch.tensor(1).unflatten(0, [0]) with self.assertRaisesRegex(RuntimeError, r"only one dimension can be inferred"): torch.randn(5, 10).unflatten(1, (-1, -1)) with self.assertRaisesRegex(RuntimeError, r"Provided sizes \[-1, 4\] don't multiply up to the size of dim 1 \(10\)"): torch.randn(5, 10).unflatten(1, (-1, 4)) with self.assertRaisesRegex(RuntimeError, r"the unspecified dimension size -1 can be any value and is ambiguous"): torch.randn(2, 0).unflatten(1, (2, -1, 0)) @staticmethod def _test_gather(self, cast, test_bounds=True): m, n, o = random.randint(10, 20), random.randint(10, 20), random.randint(10, 20) elems_per_row = random.randint(1, 10) dim = random.randrange(3) for dtype in {torch.float32, torch.complex64, torch.complex128}: src = torch.randn(m, n, o, dtype=dtype) idx_size = [m, n, o] idx_size[dim] = elems_per_row idx = torch.LongTensor().resize_(*idx_size) AbstractTestCases._TestTorchMixin._fill_indices(self, idx, dim, src.size(dim), elems_per_row, m, n, o) src = cast(src) idx = cast(idx) actual = torch.gather(src, dim, idx) expected = cast(torch.zeros(idx_size, dtype=dtype)) for i in range(idx_size[0]): for j in range(idx_size[1]): for k in range(idx_size[2]): ii = [i, j, k] ii[dim] = idx[i, j, k] expected[i, j, k] = src[tuple(ii)] self.assertEqual(actual, expected, atol=0, rtol=0) bad_src = torch.randn(*[i - 1 for i in idx_size]) self.assertRaises(RuntimeError, lambda: torch.gather(bad_src, dim, idx)) # should throw an error when index dtype is not long with self.assertRaisesRegex(RuntimeError, 'Expected dtype int64 for index'): torch.gather(src, dim, idx.to(torch.int)) # should throw an error when out.dtype != src.dtype. with self.assertRaisesRegex(RuntimeError, 'Expected self.dtype to be equal to src.dtype'): torch.gather(src, dim, idx, out=expected.to(torch.int)) # checks for the same dimensionality with self.assertRaisesRegex(RuntimeError, 'Index tensor must have the same number of dimensions as input tensor'): torch.gather(src, dim, idx.unsqueeze(-1)) with self.assertRaisesRegex(RuntimeError, 'Index tensor must have the same number of dimensions as input tensor'): torch.gather(src.unsqueeze(-1), dim, idx) if test_bounds: idx[0][0][0] = 23 self.assertRaises(RuntimeError, lambda: torch.gather(src, dim, idx)) src = cast(torch.randn(3, 4, 5)) expected, idx = src.max(2, True) expected = cast(expected) idx = cast(idx) actual = torch.gather(src, 2, idx) self.assertEqual(actual, expected, atol=0, rtol=0) # Bool test case t = torch.tensor([[False, True], [True, True]]) self.assertEqual(torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]])), torch.tensor([[False, False], [True, True]])) def test_gather(self): self._test_gather(self, lambda t: t) @staticmethod def _test_scatter_add_mult_index_base(self, cast): m, n = 30, 40 idx = torch.zeros(m, n).long() src = torch.ones(m, n) res0 = torch.zeros(m, n).scatter_add_(0, idx, src) res1 = torch.zeros(m, n).scatter_add_(1, idx, src) self.assertEqual(res0[0, :], m * torch.ones(n), atol=0, rtol=0) self.assertEqual(res1[:, 0], n * torch.ones(m), atol=0, rtol=0) def test_scatter_add_mult_index(self): self._test_scatter_add_mult_index_base(self, lambda t: t) @staticmethod def _test_scatter_base(self, cast, method, is_scalar=False, test_bounds=True, reduction=None, *, test_complex=False): if test_complex: dtypes = [torch.complex64, torch.complex128] else: dtypes = [torch.float16, torch.float32, torch.float64] for dtype in dtypes: m, n, o = random.randint(10, 20), random.randint(10, 20), random.randint(10, 20) elems_per_row = random.randint(1, 10) dim = random.randrange(3) idx_size = [m, n, o] idx_size[dim] = elems_per_row idx = cast(torch.LongTensor().resize_(*idx_size)) AbstractTestCases._TestTorchMixin._fill_indices(self, idx, dim, ([m, n, o])[dim], elems_per_row, m, n, o) src_size = [random.randint(1, 5) + s for s in idx_size] if is_scalar: src = random.random() else: src = cast(torch.randn(src_size, dtype=dtype)) base = cast(torch.randn(m, n, o, dtype=dtype)) if reduction: actual = getattr(base.clone(), method)(dim, idx, src, reduce=reduction) else: actual = getattr(base.clone(), method)(dim, idx, src) expected = base.clone() for i in range(idx_size[0]): for j in range(idx_size[1]): for k in range(idx_size[2]): ii = [i, j, k] ii[dim] = idx[i, j, k] if method == 'scatter_add_': expected[tuple(ii)] += src[i, j, k] else: # method may be 'scatter_' or 'scatter' # both might have a reduction argument value = src if is_scalar else src[i, j, k] if reduction == "add": expected[tuple(ii)] += value elif reduction == "multiply": expected[tuple(ii)] *= value else: expected[tuple(ii)] = value self.assertEqual(actual, expected, atol=0, rtol=0) # should throw an error when self.dtype != src.dtype. # we ignore the case when src is Scalar, as it gets # cast via src.to<scalar_t>. if not is_scalar: with self.assertRaisesRegex(RuntimeError, 'Expected self.dtype to be equal to src.dtype'): getattr(base.clone().type(torch.int), method)(dim, idx, src) with self.assertRaisesRegex(RuntimeError, 'Expected self.dtype to be equal to src.dtype'): getattr(base.clone(), method)(dim, idx, src.type(torch.int)) # should throw an error when index dtype is not long with self.assertRaisesRegex(RuntimeError, 'Expected dtype int64 for index'): getattr(base.clone(), method)(dim, idx.type(torch.int), src) # check for the same dimensionality with self.assertRaisesRegex(RuntimeError, 'Index tensor must have the same number of dimensions as self tensor'): getattr(base.clone().unsqueeze(-1), method)(dim, idx, src) with self.assertRaisesRegex(RuntimeError, 'Index tensor must have the same number of dimensions as self tensor'): getattr(base.clone(), method)(dim, idx.unsqueeze(-1), src) if not is_scalar: with self.assertRaisesRegex(RuntimeError, 'Index tensor must have the same number of dimensions as src tensor'): getattr(base.clone(), method)(dim, idx, src.unsqueeze(-1)) if test_bounds: idx[0][0][0] = 34 with self.assertRaises(RuntimeError): if reduction: getattr(base.clone(), method)(dim, idx, src, reduce=reduction) else: getattr(base.clone(), method)(dim, idx, src) # test for empty index, should be a no-op idx = cast(torch.LongTensor()) if reduction: actual = getattr(base.clone(), method)(dim, idx, src, reduce=reduction) else: actual = getattr(base.clone(), method)(dim, idx, src) self.assertEqual(actual, base, atol=0, rtol=0) def test_scatter(self): self._test_scatter_base(self, lambda t: t, 'scatter_') def test_scatterAdd(self): self._test_scatter_base(self, lambda t: t, 'scatter_add_') def test_scatterFill(self): self._test_scatter_base(self, lambda t: t, 'scatter_', True) def test_scatterReduce(self): for method in ["add", "multiply"]: self._test_scatter_base(self, lambda t: t, 'scatter_', reduction=method) self._test_scatter_base(self, lambda t: t, 'scatter_', True, reduction=method) def test_structseq_repr(self): a = torch.arange(250).reshape(5, 5, 10) expected = """ torch.return_types.max( values=tensor([[ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49], [ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], [140, 141, 142, 143, 144, 145, 146, 147, 148, 149], [190, 191, 192, 193, 194, 195, 196, 197, 198, 199], [240, 241, 242, 243, 244, 245, 246, 247, 248, 249]]), indices=tensor([[4, 4, 4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4, 4, 4]]))""" self.assertEqual(repr(a.max(1)), textwrap.dedent(expected).strip()) def test_is_same_size(self): t1 = torch.empty(3, 4, 9, 10) t2 = torch.empty(3, 4) t3 = torch.empty(1, 9, 3, 3) t4 = torch.empty(3, 4, 9, 10) self.assertFalse(t1.is_same_size(t2)) self.assertFalse(t1.is_same_size(t3)) self.assertTrue(t1.is_same_size(t4)) def test_tensor_set(self): t1 = torch.tensor([]) t2 = torch.empty(3, 4, 9, 10).uniform_() t1.set_(t2) self.assertEqual(t1.storage()._cdata, t2.storage()._cdata) size = torch.Size([9, 3, 4, 10]) t1.set_(t2.storage(), 0, size) self.assertEqual(t1.size(), size) t1.set_(t2.storage(), 0, tuple(size)) self.assertEqual(t1.size(), size) self.assertEqual(t1.stride(), (120, 40, 10, 1)) stride = (10, 360, 90, 1) t1.set_(t2.storage(), 0, size, stride) self.assertEqual(t1.stride(), stride) t1.set_(t2.storage(), 0, size=size, stride=stride) self.assertEqual(t1.size(), size) self.assertEqual(t1.stride(), stride) # test argument names t1 = torch.tensor([]) # 1. case when source is tensor t1.set_(source=t2) self.assertEqual(t1.storage()._cdata, t2.storage()._cdata) # 2. case when source is storage t1.set_(source=t2.storage()) self.assertEqual(t1.storage()._cdata, t2.storage()._cdata) # 3. case when source is storage, and other args also specified t1.set_(source=t2.storage(), storage_offset=0, size=size, stride=stride) self.assertEqual(t1.size(), size) self.assertEqual(t1.stride(), stride) t1 = torch.tensor([True, True], dtype=torch.bool) t2 = torch.tensor([False, False], dtype=torch.bool) t1.set_(t2) self.assertEqual(t1.storage()._cdata, t2.storage()._cdata) def test_tensor_set_errors(self): f_cpu = torch.randn((2, 3), dtype=torch.float32) d_cpu = torch.randn((2, 3), dtype=torch.float64) # change dtype self.assertRaises(RuntimeError, lambda: f_cpu.set_(d_cpu.storage())) self.assertRaises(RuntimeError, lambda: f_cpu.set_(d_cpu.storage(), 0, d_cpu.size(), d_cpu.stride())) self.assertRaises(RuntimeError, lambda: f_cpu.set_(d_cpu)) # change device if torch.cuda.is_available(): f_cuda = torch.randn((2, 3), dtype=torch.float32, device='cuda') # cpu -> cuda self.assertRaises(RuntimeError, lambda: f_cpu.set_(f_cuda.storage())) self.assertRaises(RuntimeError, lambda: f_cpu.set_(f_cuda.storage(), 0, f_cuda.size(), f_cuda.stride())) self.assertRaises(RuntimeError, lambda: f_cpu.set_(f_cuda)) # cuda -> cpu self.assertRaises(RuntimeError, lambda: f_cuda.set_(f_cpu.storage())) self.assertRaises(RuntimeError, lambda: f_cuda.set_(f_cpu.storage(), 0, f_cpu.size(), f_cpu.stride())) self.assertRaises(RuntimeError, lambda: f_cuda.set_(f_cpu)) def test_equal(self): # Contiguous, 1D t1 = torch.tensor((3., 4., 9., 10.)) t2 = t1.contiguous() t3 = torch.tensor((1., 9., 3., 10.)) t4 = torch.tensor((3., 4., 9.)) t5 = torch.tensor([]) self.assertTrue(t1.equal(t2)) self.assertFalse(t1.equal(t3)) self.assertFalse(t1.equal(t4)) self.assertFalse(t1.equal(t5)) self.assertTrue(torch.equal(t1, t2)) self.assertFalse(torch.equal(t1, t3)) self.assertFalse(torch.equal(t1, t4)) self.assertFalse(torch.equal(t1, t5)) # Non contiguous, 2D s = torch.tensor(((1, 2, 3, 4), (5, 6, 7, 8))) s1 = s[:, 1:3] s2 = s1.clone() s3 = torch.tensor(((2, 3), (6, 7))) s4 = torch.tensor(((0, 0), (0, 0))) self.assertFalse(s1.is_contiguous()) self.assertTrue(s1.equal(s2)) self.assertTrue(s1.equal(s3)) self.assertFalse(s1.equal(s4)) self.assertTrue(torch.equal(s1, s2)) self.assertTrue(torch.equal(s1, s3)) self.assertFalse(torch.equal(s1, s4)) def test_element_size(self): byte = torch.ByteStorage().element_size() char = torch.CharStorage().element_size() short = torch.ShortStorage().element_size() int = torch.IntStorage().element_size() long = torch.LongStorage().element_size() float = torch.FloatStorage().element_size() double = torch.DoubleStorage().element_size() bool = torch.BoolStorage().element_size() bfloat16 = torch.BFloat16Storage().element_size() complexfloat = torch.ComplexFloatStorage().element_size() complexdouble = torch.ComplexDoubleStorage().element_size() self.assertEqual(byte, torch.ByteTensor().element_size()) self.assertEqual(char, torch.CharTensor().element_size()) self.assertEqual(short, torch.ShortTensor().element_size()) self.assertEqual(int, torch.IntTensor().element_size()) self.assertEqual(long, torch.LongTensor().element_size()) self.assertEqual(float, torch.FloatTensor().element_size()) self.assertEqual(double, torch.DoubleTensor().element_size()) self.assertEqual(bool, torch.BoolTensor().element_size()) self.assertEqual(bfloat16, torch.tensor([], dtype=torch.bfloat16).element_size()) self.assertEqual(complexfloat, torch.tensor([], dtype=torch.complex64).element_size()) self.assertEqual(complexdouble, torch.tensor([], dtype=torch.complex128).element_size()) self.assertGreater(byte, 0) self.assertGreater(char, 0) self.assertGreater(short, 0) self.assertGreater(int, 0) self.assertGreater(long, 0) self.assertGreater(float, 0) self.assertGreater(double, 0) self.assertGreater(bool, 0) self.assertGreater(bfloat16, 0) self.assertGreater(complexfloat, 0) self.assertGreater(complexdouble, 0) # These tests are portable, not necessarily strict for your system. self.assertEqual(byte, 1) self.assertEqual(char, 1) self.assertEqual(bool, 1) self.assertGreaterEqual(short, 2) self.assertGreaterEqual(int, 2) self.assertGreaterEqual(int, short) self.assertGreaterEqual(long, 4) self.assertGreaterEqual(long, int) self.assertGreaterEqual(double, float) def test_permute(self): orig = [1, 2, 3, 4, 5, 6, 7] perm = torch.randperm(7).tolist() x = torch.empty(*orig).fill_(0) new = [i - 1 for i in x.permute(*perm).size()] self.assertEqual(perm, new) self.assertEqual(x.size(), orig) def test_reversed(self): val = torch.arange(0, 10) self.assertEqual(reversed(val), torch.arange(9, -1, -1)) val = torch.arange(1, 10).view(3, 3) self.assertEqual(reversed(val), torch.tensor([[7, 8, 9], [4, 5, 6], [1, 2, 3]])) val = torch.tensor(42) self.assertEqual(reversed(val), torch.tensor(42)) def test_contains(self): x = torch.arange(0, 10) self.assertEqual(4 in x, True) self.assertEqual(12 in x, False) x = torch.arange(1, 10).view(3, 3) val = torch.arange(1, 4) self.assertEqual(val in x, True) val += 10 self.assertEqual(val in x, False) self.assertRaisesRegex( RuntimeError, "Tensor.__contains__ only supports Tensor or scalar, but you passed in a {}.".format(type("foo")), lambda: "foo" in x) self.assertRaisesRegex( RuntimeError, "Tensor.__contains__ only supports Tensor or scalar, but you passed in a {}.".format(type([1, 2])), lambda: [1, 2] in x) def test_deepcopy_parameter(self): from copy import deepcopy l = torch.nn.Linear(10, 1) s = l.state_dict(keep_vars=True) self.assertEqual(torch.nn.Parameter, type(s['weight'])) self.assertEqual(torch.nn.Parameter, type(s['bias'])) s2 = deepcopy(s) self.assertEqual(torch.nn.Parameter, type(s2['weight'])) self.assertEqual(torch.nn.Parameter, type(s2['bias'])) def test_pickle(self): import pickle a = torch.randn(5, 5) serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertEqual(a, b) def test_pickle_parameter(self): import pickle a = torch.nn.Parameter(torch.randn(5, 5)) serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertTrue(isinstance(b, torch.nn.Parameter)) self.assertEqual(a.requires_grad, b.requires_grad) self.assertEqual(a, b) def test_pickle_parameter_no_requires_grad(self): import pickle a = torch.nn.Parameter(torch.randn(5, 5), requires_grad=False) serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertTrue(isinstance(b, torch.nn.Parameter)) self.assertEqual(a.requires_grad, b.requires_grad) self.assertEqual(a, b) def test_pickle_dtype(self): t = torch.float32 serialized = pickle.dumps(t) b = pickle.loads(serialized) self.assertTrue(isinstance(b, torch.dtype)) self.assertEqual(id(b), id(t)) def test_pickle_size(self): a = torch.rand(10).size() serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertTrue(isinstance(b, torch.Size)) self.assertEqual(a, b) def test_pickle_function(self): # https://github.com/pytorch/pytorch/issues/37703 a = torch.tanh serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertEqual(a, b) def test_generator_cpu(self): # test default generators are equal self.assertEqual(torch.default_generator, torch.default_generator) # tests Generator API # manual_seed, seed, initial_seed, get_state, set_state g1 = torch.Generator() g2 = torch.Generator() g1.manual_seed(12345) g2.manual_seed(12345) self.assertEqual(g1.initial_seed(), g2.initial_seed()) g1.seed() g2.seed() self.assertNotEqual(g1.initial_seed(), g2.initial_seed()) g1 = torch.Generator() g2_state = g2.get_state() g2_randn = torch.randn(1, generator=g2) g1.set_state(g2_state) g1_randn = torch.randn(1, generator=g1) self.assertEqual(g1_randn, g2_randn) default_state = torch.default_generator.get_state() q = torch.empty(100) g1_normal = q.normal_() g2 = torch.Generator() g2.set_state(default_state) g2_normal = q.normal_(generator=g2) self.assertEqual(g1_normal, g2_normal) def test_invalid_generator_raises(self): self.assertRaises(RuntimeError, lambda: torch.Generator('opengl')) def _sobol_reference_samples(self, scramble: bool) -> torch.Tensor: if not scramble: # theoretical values from Joe Kuo 2010 return torch.tensor( [ [0., 0.], [0.5, 0.5], [0.75, 0.25], [0.25, 0.75], [0.375, 0.375], [0.875, 0.875], [0.625, 0.125], [0.125, 0.625], ], ) else: # theoretical values unknown: convergence properties checked return torch.tensor( [ [0.50860737, 0.29320504], [0.07116939, 0.89594537], [0.49354145, 0.11524881], [0.93097717, 0.70244044], [0.87266153, 0.23887917], [0.31021884, 0.57600391], [0.13687253, 0.42054182], [0.69931293, 0.77336788], ], ) def test_sobolengine_bounds(self, scramble: bool = False): engine = torch.quasirandom.SobolEngine(100, scramble=scramble, seed=123456) sample = engine.draw(512) self.assertTrue(torch.all(sample >= 0)) self.assertTrue(torch.all(sample <= 1)) def test_sobolengine_bounds_scrambled(self): self.test_sobolengine_bounds(scramble=True) def test_sobolengine_draw(self, scramble: bool = False): ref_sample = self._sobol_reference_samples(scramble=scramble) engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456) sample = engine.draw(n=len(ref_sample)) self.assertEqual(sample, ref_sample) self.assertEqual(engine.num_generated, len(ref_sample)) def test_sobolengine_draw_scrambled(self): self.test_sobolengine_draw(scramble=True) def test_sobolengine_first_point(self): for dtype in (torch.float, torch.double): engine = torch.quasirandom.SobolEngine(2, scramble=False) sample = engine.draw(1, dtype=dtype) self.assertTrue(torch.all(sample == 0)) self.assertEqual(sample.dtype, dtype) for dtype in (torch.float, torch.double): engine = torch.quasirandom.SobolEngine(2, scramble=True, seed=123456) sample = engine.draw(1, dtype=dtype) self.assertTrue(torch.all(sample != 0)) self.assertEqual(sample.dtype, dtype) def test_sobolengine_continuing(self, scramble: bool = False): ref_sample = self._sobol_reference_samples(scramble=scramble) engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456) n_half = len(ref_sample) // 2 _ = engine.draw(n=n_half) sample = engine.draw(n=n_half) torch.testing.assert_allclose(sample, ref_sample[n_half:]) def test_sobolengine_continuing_scrambled(self): self.test_sobolengine_continuing(scramble=True) def test_sobolengine_reset(self, scramble: bool = False): ref_sample = self._sobol_reference_samples(scramble=scramble) engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456) _ = engine.draw(n=len(ref_sample) // 2) engine.reset() self.assertEqual(engine.num_generated, 0) sample = engine.draw(n=len(ref_sample)) torch.testing.assert_allclose(sample, ref_sample) def test_sobolengine_reset_scrambled(self): self.test_sobolengine_reset(scramble=True) def test_sobolengine_fast_forward(self, scramble: bool = False): ref_sample = self._sobol_reference_samples(scramble=scramble) engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456) engine.fast_forward(4) sample = engine.draw(n=4) torch.testing.assert_allclose(sample, ref_sample[4:]) # alternate fast forwarding with sampling engine.reset() even_draws = [] for i in range(8): if i % 2 == 0: even_draws.append(engine.draw()) else: engine.fast_forward(1) torch.testing.assert_allclose( ref_sample[[i for i in range(8) if i % 2 == 0]], np.concatenate(even_draws), ) def test_sobolengine_fast_forward_scrambled(self): self.test_sobolengine_fast_forward(scramble=True) def test_sobolengine_distribution(self, scramble=False): d = 50 engine = torch.quasirandom.SobolEngine(d, scramble=scramble, seed=123456) sample = engine.draw(1024) torch.testing.assert_allclose( torch.mean(sample, dim=0), torch.full((d,), 0.5), atol=2, rtol=2 ) torch.testing.assert_allclose( np.percentile(sample, 25, axis=0), np.repeat(0.25, d), atol=2, rtol=2 ) torch.testing.assert_allclose( np.percentile(sample, 75, axis=0), np.repeat(0.75, d), atol=2, rtol=2 ) def test_sobolengine_distribution_scrambled(self): self.test_sobolengine_distribution(scramble=True) def test_sobolengine_draw_base2(self, scramble=False): ref_sample = self._sobol_reference_samples(scramble=scramble) engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456) sample = engine.draw_base2(2) self.assertEqual(ref_sample[:4], sample) # resampling still having N=2**n sample = engine.draw_base2(2) self.assertEqual(ref_sample[4:8], sample) def test_sobolengine_draw_base2_scrambled(self): self.test_sobolengine_draw_base2(scramble=True) def test_sobolengine_raise(self): maxdim = torch.quasirandom.SobolEngine.MAXDIM with self.assertRaises(ValueError): torch.quasirandom.SobolEngine(maxdim + 1) def test_sobolengine_high_dim(self): engine = torch.quasirandom.SobolEngine(1111, scramble=False, seed=123456) samples1 = engine.draw() vals1, counts1 = torch.unique(samples1, return_counts=True) samples2 = engine.draw() vals2, counts2 = torch.unique(samples2, return_counts=True) self.assertEqual(vals1.item(), 0.0) self.assertEqual(counts1.item(), 1111) self.assertEqual(vals2.item(), 0.5) self.assertEqual(counts1.item(), 1111) def test_parsing_int64(self): # accepts integer arguments x = torch.cumsum(torch.ones(5, 5), 0) self.assertEqual(x, torch.cumsum(torch.ones(5, 5), torch.tensor(0))) # doesn't accept floating point variables self.assertRaises(TypeError, lambda: torch.cumsum(torch.ones(5, 5), torch.tensor(0.))) def test_parsing_double(self): # accepts floating point and integer arguments x = torch.randn(2, 3) torch.isclose(x, x, 1, 1) self.assertTrue(torch.isclose(x, x, 1, 1).all()) self.assertTrue(torch.isclose(x, x, 1.5, 1.).all()) # accepts floating point and integer tensors self.assertTrue(torch.isclose(x, x, torch.tensor(1), torch.tensor(1)).all()) self.assertTrue(torch.isclose(x, x, torch.tensor(1.5), torch.tensor(1.)).all()) # doesn't accept variables with requires_grad self.assertRaises(TypeError, lambda: torch.isclose(x, x, torch.tensor(1.5), torch.tensor(1., requires_grad=True)).all()) def test_parsing_intlist(self): # parse with integer variables self.assertEqual(torch.Size([3, 4]), torch.ones((torch.tensor(3), torch.tensor(4))).shape) self.assertEqual(torch.Size([3, 4]), torch.ones(torch.tensor(3), torch.tensor(4)).shape) # parse with numpy integers self.assertEqual(torch.Size([3, 4]), torch.ones((np.array(3), np.int64(4))).shape) self.assertEqual(torch.Size([3, 4]), torch.ones(np.array(3), np.int64(4)).shape) self.assertEqual(torch.Size([3, 4]), torch.ones((np.int64(3), np.array(4))).shape) self.assertEqual(torch.Size([3, 4]), torch.ones(np.int64(3), np.array(4)).shape) # fail parse with float variables self.assertRaises(TypeError, lambda: torch.ones((torch.tensor(3.), torch.tensor(4)))) # fail parse with numpy floats self.assertRaises(TypeError, lambda: torch.ones((np.float(3.), torch.tensor(4)))) self.assertRaises(TypeError, lambda: torch.ones((np.array(3.), torch.tensor(4)))) # fail parse with > 1 element variables self.assertRaises(TypeError, lambda: torch.ones(torch.tensor(3, 3))) self.assertRaises(TypeError, lambda: torch.ones((torch.tensor(3, 3)))) self.assertRaises(TypeError, lambda: torch.ones(np.array(3, 3))) self.assertRaises(TypeError, lambda: torch.ones((np.array(3, 3)))) # fail parse with additional positional args after intlist arg self.assertRaisesRegex(TypeError, "received an invalid combination of arguments", lambda: torch.LongTensor((6, 0), 1, 1, 0)) self.assertRaisesRegex(TypeError, "missing 1 required positional arguments", lambda: torch.tensor().new_zeros((5, 5), 0)) def test_half_tensor(self): devices = ["cpu"] if torch.cuda.is_available(): devices.append("cuda") # contiguous tensor # non-contiguous tensor # dense non-overlapping tensor # non-dense non-overlapping sliced tensor # non-dense overlapping equal strides for device in devices: tset = ( torch.randn(4, 3, 2, device=device, dtype=torch.float).contiguous(), torch.randn(4, 3, 2, device=device, dtype=torch.float).transpose(0, 1), torch.randn(4, 3, 2, device=device, dtype=torch.float), torch.randn(4, 3, 2, device=device, dtype=torch.float)[:, :, ::2], torch.empty_strided( (4, 2, 3), (10, 3, 3), device=device, dtype=torch.float ).copy_(torch.rand((4, 2, 3), dtype=torch.float, device=device)), ) for x in tset: self.assertEqual(x.half().float(), x, atol=1e-3, rtol=0) xh = x.half() with tempfile.NamedTemporaryFile() as f: torch.save(xh, f) f.seek(0) xh2 = torch.load(f) self.assertEqual(xh.float(), xh2.float()) def test_from_buffer(self): a = bytearray([1, 2, 3, 4]) self.assertEqual(torch.ByteStorage.from_buffer(a).tolist(), [1, 2, 3, 4]) shorts = torch.ShortStorage.from_buffer(a, 'big') self.assertEqual(shorts.size(), 2) self.assertEqual(shorts.tolist(), [258, 772]) ints = torch.IntStorage.from_buffer(a, 'little') self.assertEqual(ints.size(), 1) self.assertEqual(ints[0], 67305985) f = bytearray([0x40, 0x10, 0x00, 0x00]) floats = torch.FloatStorage.from_buffer(f, 'big') self.assertEqual(floats.size(), 1) self.assertEqual(floats[0], 2.25) f = bytearray([0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x40]) bools = torch.BoolStorage.from_buffer(f, 'big') self.assertEqual(bools.size(), 8) self.assertEqual(bools.tolist(), [False, True, True, True, True, True, True, True]) self.assertEqual(bools.type(), 'torch.BoolStorage') f = bytearray(b'\x80\x02\x8a\nl\xfc\x9cF\xf9 j\xa8P\x19.\x80\x02M\xe9') bools = torch.BoolStorage.from_buffer(f, 'big') self.assertEqual(bools.size(), 19) f = bytearray(b'\0x4A') bools = torch.BoolStorage.from_buffer(f, 'big') self.assertEqual(bools.size(), 4) self.assertEqual(bools.tolist(), [False, True, True, True]) def test_storage_casts(self): storage = torch.IntStorage([-1, 0, 1, 2, 3, 4]) self.assertEqual(storage.size(), 6) self.assertEqual(storage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(storage.type(), 'torch.IntStorage') self.assertIs(storage.dtype, torch.int32) floatStorage = storage.float() self.assertEqual(floatStorage.size(), 6) self.assertEqual(floatStorage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(floatStorage.type(), 'torch.FloatStorage') self.assertEqual(floatStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(floatStorage.dtype, torch.float32) halfStorage = storage.half() self.assertEqual(halfStorage.size(), 6) self.assertEqual(halfStorage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(halfStorage.type(), 'torch.HalfStorage') self.assertEqual(halfStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(halfStorage.dtype, torch.float16) bfloat16Storage = storage.bfloat16() self.assertEqual(bfloat16Storage.size(), 6) self.assertEqual(bfloat16Storage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(bfloat16Storage.type(), 'torch.BFloat16Storage') self.assertEqual(bfloat16Storage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(bfloat16Storage.dtype, torch.bfloat16) longStorage = storage.long() self.assertEqual(longStorage.size(), 6) self.assertEqual(longStorage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(longStorage.type(), 'torch.LongStorage') self.assertEqual(longStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(longStorage.dtype, torch.int64) shortStorage = storage.short() self.assertEqual(shortStorage.size(), 6) self.assertEqual(shortStorage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(shortStorage.type(), 'torch.ShortStorage') self.assertEqual(shortStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(shortStorage.dtype, torch.int16) doubleStorage = storage.double() self.assertEqual(doubleStorage.size(), 6) self.assertEqual(doubleStorage.tolist(), [-1.0, 0.0, 1.0, 2.0, 3.0, 4.0]) self.assertEqual(doubleStorage.type(), 'torch.DoubleStorage') self.assertEqual(doubleStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(doubleStorage.dtype, torch.float64) charStorage = storage.char() self.assertEqual(charStorage.size(), 6) self.assertEqual(charStorage.tolist(), [-1.0, 0.0, 1.0, 2.0, 3.0, 4.0]) self.assertEqual(charStorage.type(), 'torch.CharStorage') self.assertEqual(charStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(charStorage.dtype, torch.int8) byteStorage = storage.byte() self.assertEqual(byteStorage.size(), 6) self.assertEqual(byteStorage.tolist(), [255, 0, 1, 2, 3, 4]) self.assertEqual(byteStorage.type(), 'torch.ByteStorage') self.assertEqual(byteStorage.int().tolist(), [255, 0, 1, 2, 3, 4]) self.assertIs(byteStorage.dtype, torch.uint8) boolStorage = storage.bool() self.assertEqual(boolStorage.size(), 6) self.assertEqual(boolStorage.tolist(), [True, False, True, True, True, True]) self.assertEqual(boolStorage.type(), 'torch.BoolStorage') self.assertEqual(boolStorage.int().tolist(), [1, 0, 1, 1, 1, 1]) self.assertIs(boolStorage.dtype, torch.bool) complexfloat_storage = torch.ComplexFloatStorage([-1, 0, 1 + 2j, 2.5j, 3.5, 4 - 2j]) self.assertEqual(complexfloat_storage.size(), 6) self.assertEqual(complexfloat_storage.tolist(), [-1, 0, 1 + 2j, 2.5j, 3.5, 4 - 2j]) self.assertEqual(complexfloat_storage.type(), 'torch.ComplexFloatStorage') self.assertIs(complexfloat_storage.dtype, torch.complex64) complexdouble_storage = complexfloat_storage.complex_double() self.assertEqual(complexdouble_storage.size(), 6) self.assertEqual(complexdouble_storage.tolist(), [-1, 0, 1 + 2j, 2.5j, 3.5, 4 - 2j]) self.assertEqual(complexdouble_storage.type(), 'torch.ComplexDoubleStorage') self.assertIs(complexdouble_storage.dtype, torch.complex128) def test_from_file(self): def assert_with_filename(filename): size = 10000 s1 = torch.FloatStorage.from_file(filename, True, size) t1 = torch.FloatTensor(s1).copy_(torch.randn(size)) # check mapping s2 = torch.FloatStorage.from_file(filename, True, size) t2 = torch.FloatTensor(s2) self.assertEqual(t1, t2, atol=0, rtol=0) # check changes to t1 from t2 rnum = random.uniform(-1, 1) t1.fill_(rnum) self.assertEqual(t1, t2, atol=0, rtol=0) # check changes to t2 from t1 rnum = random.uniform(-1, 1) t2.fill_(rnum) self.assertEqual(t1, t2, atol=0, rtol=0) # release the tensors del s1, t1, s2, t2 with TemporaryFileName() as fname: assert_with_filename(fname) if IS_FILESYSTEM_UTF8_ENCODING: with TemporaryDirectoryName(suffix='中文') as dname, TemporaryFileName(dir=dname) as fname: assert_with_filename(fname) def test_torch_from_file(self): def assert_with_filename(filename): size = 10000 s1 = torch.from_file(filename, True, size, dtype=torch.float) t1 = torch.FloatTensor(s1).copy_(torch.randn(size)) # check mapping s2 = torch.from_file(filename, True, size, dtype=torch.float) t2 = torch.FloatTensor(s2) self.assertEqual(t1, t2, atol=0, rtol=0) # check changes to t1 from t2 rnum = random.uniform(-1, 1) t1.fill_(rnum) self.assertEqual(t1, t2, atol=0, rtol=0) # check changes to t2 from t1 rnum = random.uniform(-1, 1) t2.fill_(rnum) self.assertEqual(t1, t2, atol=0, rtol=0) # release the tensors del s1, t1, s2, t2 with TemporaryFileName() as fname: assert_with_filename(fname) if IS_FILESYSTEM_UTF8_ENCODING: with TemporaryDirectoryName(suffix='中文') as dname, TemporaryFileName(dir=dname) as fname: assert_with_filename(fname) def test_print(self): default_type = torch.tensor([]).type() for t in torch._tensor_classes: if t == torch.HalfTensor: continue # HalfTensor does not support fill if t.is_sparse: continue if t.is_cuda and not torch.cuda.is_available(): continue obj = t(100, 100).fill_(1) obj.__repr__() str(obj) # test half tensor obj = torch.rand(100, 100, device='cpu').half() obj.__repr__() str(obj) for t in torch._storage_classes: if t == torch.BFloat16Storage: continue # Fix once fill is enabled for bfloat16 if t.is_cuda and not torch.cuda.is_available(): continue if t == torch.BoolStorage or t == torch.cuda.BoolStorage: obj = t(100).fill_(True) else: obj = t(100).fill_(1) obj.__repr__() str(obj) # test complex tensor # complex tensor print uses two formatters, one for real values # and the other for imag values. this is consistent with numpy x = torch.tensor([2.3 + 4j, 7 + 6j]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([2.3000+4.j, 7.0000+6.j])''') # test scientific notation for complex tensors x = torch.tensor([1e28 + 2j , -1e-28j]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.0000e+28+2.0000e+00j, -0.0000e+00-1.0000e-28j])''') # test big integer x = torch.tensor(2341234123412341) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor(2341234123412341)''') # test scientific notation x = torch.tensor([1e28, 1e-28]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.0000e+28, 1.0000e-28])''') # test scientific notation using set_printoptions x = torch.tensor([1e2, 1e-2]) torch.set_printoptions(sci_mode=True) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.0000e+02, 1.0000e-02])''') torch.set_printoptions(sci_mode=False) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([ 100.0000, 0.0100])''') torch.set_printoptions(sci_mode=None) # reset to the default value # test no leading space if all elements positive x = torch.tensor([1, 2]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1, 2])''') # test for leading space if there are negative elements x = torch.tensor([1, -2]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([ 1, -2])''') # test inf and nan x = torch.tensor([4, inf, 1.5, -inf, 0, nan, 1]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([4.0000, inf, 1.5000, -inf, 0.0000, nan, 1.0000])''') y = torch.tensor([4, inf, complex(1.5, inf), complex(-inf, 4), 0, complex(nan, inf), complex(3, nan)]) self.assertEqual(y.__repr__(), str(y)) expected_str = '''\ tensor([4.0000+0.j, inf+0.j, 1.5000+infj, -inf+4.j, 0.0000+0.j, nan+infj, 3.0000+nanj])''' self.assertExpectedInline(str(y), expected_str) # test dtype torch.set_default_dtype(torch.float) x = torch.tensor([1e-324, 1e-323, 1e-322, 1e307, 1e308, 1e309], dtype=torch.float64) self.assertEqual(x.__repr__(), str(x)) expected_str = '''\ tensor([ 0.0000e+00, 9.8813e-324, 9.8813e-323, 1.0000e+307, 1.0000e+308, inf], dtype=torch.float64)''' self.assertExpectedInline(str(x), expected_str) # test changing default dtype torch.set_default_dtype(torch.float64) self.assertEqual(x.__repr__(), str(x)) expected_str = '''\ tensor([ 0.0000e+00, 9.8813e-324, 9.8813e-323, 1.0000e+307, 1.0000e+308, inf])''' self.assertExpectedInline(str(x), expected_str) # test summary x = torch.zeros(10000) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([0., 0., 0., ..., 0., 0., 0.])''') # test internal summary function x = torch.rand(1, 20, 5, 30) summary = torch._tensor_str.get_summarized_data(x) self.assertEqual(summary.shape, (1, 6, 5, 6)) first_and_last = [0, 1, 2, -3, -2, -1] self.assertEqual(summary, x[:, first_and_last][..., first_and_last]) # test device if torch.cuda.is_available(): x = torch.tensor([123], device='cuda:0') self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([123], device='cuda:0')''') # test changing default to cuda torch.set_default_tensor_type(torch.cuda.FloatTensor) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([123])''') # test printing a tensor on a different gpu than current one. if torch.cuda.device_count() >= 2: with torch.cuda.device(1): self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([123], device='cuda:0')''') # test printing cpu tensor when default device is cuda y = torch.tensor([123], device='cpu') self.assertEqual(y.__repr__(), str(y)) self.assertExpectedInline(str(y), '''tensor([123], device='cpu')''') torch.set_default_tensor_type(default_type) # test integral floats and requires_grad x = torch.tensor([123.], requires_grad=True) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([123.], requires_grad=True)''') # test non-contiguous print # sliced tensor should have > PRINT_OPTS.threshold elements x = torch.ones(100, 2, 2, 10) y = x.as_strided(size=(100, 2, 10), stride=(2 * 2 * 10, 2 * 10, 1)) self.assertEqual(str(y), y.__repr__()) expected_str = '''\ tensor([[[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]], [[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]], [[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]], ..., [[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]], [[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]], [[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]]])\ ''' self.assertExpectedInline(str(y), expected_str) x = torch.ones(100, 2, 2, 10) * (1 + 1j) y = x.as_strided(size=(100, 2, 10), stride=(2 * 2 * 10, 2 * 10, 1)) self.assertEqual(str(y), y.__repr__()) expected_str = '''\ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]], [[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]], [[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]], ..., [[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]], [[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]], [[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]]])\ ''' self.assertExpectedInline(str(y), expected_str) # test print 0-dim tensor: there's no 0-dim in Numpy, we match arrayprint style x = torch.tensor(0.00002) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor(2.0000e-05)''') # test print boolean tensor x = torch.tensor([True]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([True])''') x = torch.tensor(True) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor(True)''') # [Numpy] test print float in sci_mode when min < 0.0001. x = torch.tensor([0.00002]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([2.0000e-05])''') # [Numpy] test print complex in sci_mode when real_min < 0.0001 and (or) imag_min < 0.0001. x = torch.tensor([0.00002]) * (1 + 1j) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([2.0000e-05+2.0000e-05j])''') # [Numpy] test print float in sci_mode when max > 1e8. # TODO: Pytorch uses fixed precision to print, while Numpy uses dragon4_scientific # to do automatic trimming and padding. x = torch.tensor([123456789.]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.2346e+08])''') # [Numpy] test print float in sci_mode when max / min > 1000. x = torch.tensor([0.01, 11]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.0000e-02, 1.1000e+01])''') # [Numpy] test print int max / min > 1000, no sci_mode x = torch.tensor([1, 1010]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([ 1, 1010])''') # [Numpy] test print int > 1e8, no sci_mode x = torch.tensor([1000000000]) # 1e9 self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1000000000])''') # [Numpy] test printing float in int_mode x = torch.tensor([1., 1000.]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([ 1., 1000.])''') # [Numpy] test printing float in int_mode in sci format when max / min > 1000. x = torch.tensor([1., 1010.]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.0000e+00, 1.0100e+03])''') def test_sizeof(self) -> None: sizeof_empty = torch.randn(0).storage().__sizeof__() sizeof_10 = torch.randn(10).storage().__sizeof__() sizeof_100 = torch.randn(100).storage().__sizeof__() self.assertEqual((sizeof_100 - sizeof_empty) // (sizeof_10 - sizeof_empty), 10) self.assertEqual((sizeof_100 - sizeof_empty) % (sizeof_10 - sizeof_empty), 0) sizeof_empty = torch.randn(0).to(torch.uint8).storage().__sizeof__() sizeof_10 = torch.randn(10).to(torch.uint8).storage().__sizeof__() sizeof_100 = torch.randn(100).to(torch.uint8).storage().__sizeof__() self.assertEqual((sizeof_100 - sizeof_empty) // (sizeof_10 - sizeof_empty), 10) self.assertEqual((sizeof_100 - sizeof_empty) % (sizeof_10 - sizeof_empty), 0) def test_iter(self) -> None: x = torch.randn(5, 5) for i, sub in enumerate(x): self.assertEqual(sub, x[i]) x = torch.tensor([]) self.assertEqual(list(x), []) def test_assertEqual(self) -> None: x = torch.FloatTensor([0]) self.assertEqual(x, 0) xv = torch.autograd.Variable(x) self.assertEqual(xv, 0) self.assertEqual(x, xv) self.assertEqual(xv, x) # Tests that setting atol or rtol without the other throws self.assertRaises(AssertionError, lambda: self.assertEqual(x, xv, atol=4)) self.assertRaises(AssertionError, lambda: self.assertEqual(x, xv, rtol=4)) self.assertRaisesRegex(TypeError, "takes from 3 to 4 positional arguments", lambda: self.assertEqual(x, xv, "", 1.0)) # type: ignore[misc] def test_new(self) -> None: x = torch.autograd.Variable(torch.tensor([])) y = torch.autograd.Variable(torch.randn(4, 4)) z = torch.autograd.Variable(torch.IntTensor([1, 2, 3])) self.assertEqual(x.new().shape, [0]) self.assertEqual(x.new(), x) self.assertEqual(x.new(1, 2).shape, [1, 2]) self.assertEqual(x.new(torch.Size([3, 4])).shape, [3, 4]) self.assertEqual(x.new([3, 4]).shape, [2]) self.assertEqual(x.new([3, 4]).tolist(), [3, 4]) self.assertEqual(x.new((3, 4)).tolist(), [3, 4]) self.assertEqual(x.new([np.int32(3), np.float64(4)]).tolist(), [3, 4]) self.assertEqual(x.new(np.array((3, 4))).tolist(), [3, 4]) self.assertEqual(x.new([z[2], z[0] + 3]).tolist(), [3, 4]) self.assertEqual(x.new(size=(3, 4)).shape, [3, 4]) self.assertEqual(x.new(()).shape, [0]) self.assertEqual(x.new(y.storage()).data_ptr(), y.data_ptr()) self.assertEqual(x.new(y).data_ptr(), y.data_ptr()) self.assertIsNot(x.new(y), y) self.assertRaises(TypeError, lambda: x.new(z)) # TypeError would be better self.assertRaises(RuntimeError, lambda: x.new(z.storage())) @unittest.skipIf(PYTORCH_CUDA_MEMCHECK, "is_pinned uses failure to detect pointer property") def test_pin_memory(self): x = torch.randn(3, 5) self.assertFalse(x.is_pinned()) if not torch.cuda.is_available(): self.assertRaises(RuntimeError, lambda: x.pin_memory()) else: pinned = x.pin_memory() self.assertTrue(pinned.is_pinned()) self.assertEqual(pinned, x) self.assertNotEqual(pinned.data_ptr(), x.data_ptr()) # test that pin_memory on already pinned tensor has no effect self.assertIs(pinned, pinned.pin_memory()) self.assertEqual(pinned.data_ptr(), pinned.pin_memory().data_ptr()) def test_error_msg_type_translation(self): with self.assertRaisesRegex( RuntimeError, # message includes both Double and Long '(?=.*Double)(?=.*Long)'): # Calls model with a LongTensor input but DoubleTensor weights input = torch.zeros(1, 1, 1, 6, dtype=torch.long) weight = torch.nn.Parameter(torch.zeros(1, 1, 1, 3, dtype=torch.double)) model = torch.nn.Conv2d(1, 1, (1, 3), stride=1, padding=0, bias=False) model.weight = weight out = model(input) def test_apply(self): x = torch.arange(1, 6) res = x.clone().apply_(lambda k: k + k) self.assertEqual(res, x * 2) self.assertRaises(TypeError, lambda: x.apply_(lambda k: "str")) def test_map(self): x = torch.autograd.Variable(torch.randn(3, 3)) y = torch.autograd.Variable(torch.randn(3)) res = x.clone() res.map_(y, lambda a, b: a + b) self.assertEqual(res, x + y) self.assertRaisesRegex(TypeError, "not callable", lambda: res.map_(y, "str")) def test_map2(self): x = torch.autograd.Variable(torch.randn(3, 3)) y = torch.autograd.Variable(torch.randn(3)) z = torch.autograd.Variable(torch.randn(1, 3)) res = x.clone() res.map2_(y, z, lambda a, b, c: a + b * c) self.assertEqual(res, x + y * z) z.requires_grad = True self.assertRaisesRegex( RuntimeError, "requires grad", lambda: res.map2_(y, z, lambda a, b, c: a + b * c)) def test_Size(self): x = torch.Size([1, 2, 3]) self.assertIsInstance(x, tuple) self.assertEqual(x[0], 1) self.assertEqual(x[1], 2) self.assertEqual(x[2], 3) self.assertEqual(len(x), 3) self.assertRaises(TypeError, lambda: torch.Size(torch.ones(3))) self.assertIsInstance(x * 2, torch.Size) self.assertIsInstance(x[:-1], torch.Size) self.assertIsInstance(x + x, torch.Size) def test_Size_scalar(self): three = torch.tensor(3) two = torch.tensor(2) x = torch.Size([0, 1, two, three, 4]) for i in range(1, 5): self.assertEqual(x[i], i) def test_Size_iter(self): for sizes in [iter([1, 2, 3, 4, 5]), range(1, 6)]: x = torch.Size(sizes) for i in range(0, 5): self.assertEqual(x[i], i + 1) def test_t_not_2d_error(self): self.assertRaises(RuntimeError, lambda: torch.randn(2, 3, 4).t()) self.assertRaises(RuntimeError, lambda: torch.randn(2, 3, 4).t_()) # skip this test for now as it affects all tests @unittest.skipIf(True, "flush_denormal not supported") def test_set_flush_denormal(self): tiny_float = 1e-42 tiny_double = 1e-320 float_tensor = torch.FloatTensor([1.0, tiny_float]) double_tensor = torch.DoubleTensor([1.0, tiny_float, tiny_double]) self.assertEqual(float_tensor[0], 1.0, atol=0.0, rtol=0) self.assertEqual(float_tensor[1], tiny_float, atol=tiny_float / 16, rtol=0) self.assertEqual(double_tensor[0], 1.0, atol=0.0, rtol=0) self.assertEqual(double_tensor[1], tiny_float, atol=0.0, rtol=0) self.assertEqual(double_tensor[2], tiny_double, atol=0.0, rtol=0) torch.set_flush_denormal(True) self.assertEqual(float_tensor[0], 1.0, atol=0.0, rtol=0) self.assertEqual(float_tensor[1], 0.0, atol=0.0, rtol=0) # tiny_float to zero self.assertEqual(double_tensor[0], 1.0, atol=0.0, rtol=0) # tiny_float is not converted to zero in double type self.assertEqual(double_tensor[1], tiny_float, atol=0.0, rtol=0) self.assertEqual(double_tensor[2], 0.0, atol=0.0, rtol=0) # tiny_double to zero torch.set_flush_denormal(False) def test_show_config(self): # We can't usefully test the output; just make sure this doesn't crash torch.__config__.show() @unittest.skipIf(IS_FBCODE, "CXX_FLAGS is only for OSS build.") def test_cxx_flags(self): torch.__config__._cxx_flags() def test_parallel_info(self): torch.__config__.parallel_info() @slowTest def test_slow_test(self): # Just a smoketest to make sure our slowTest decorator works. pass def test_is_nonzero(self): with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"): torch.tensor([]).is_nonzero() with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"): torch.tensor([0, 0]).is_nonzero() self.assertFalse(torch.tensor(0).is_nonzero()) self.assertTrue(torch.tensor(1).is_nonzero()) self.assertFalse(torch.tensor([0]).is_nonzero()) self.assertTrue(torch.tensor([1]).is_nonzero()) self.assertFalse(torch.tensor([[0]]).is_nonzero()) self.assertTrue(torch.tensor([[1]]).is_nonzero()) self.assertTrue(torch.tensor(0.1).is_nonzero()) self.assertTrue(torch.tensor(-0.1).is_nonzero()) self.assertFalse(torch.tensor(0.0).is_nonzero()) self.assertTrue(torch.tensor(True).is_nonzero()) self.assertFalse(torch.tensor(False).is_nonzero()) self.assertFalse(torch.tensor(0 + 0j).is_nonzero()) self.assertTrue(torch.tensor(0 + 0.1j).is_nonzero()) def test_assert_async(self): with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"): torch._assert_async(torch.tensor([])) with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"): torch._assert_async(torch.tensor([0, 0])) with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"): torch._assert_async(torch.tensor(0)) torch._assert_async(torch.tensor(1)) torch._assert_async(torch.tensor(0.1)) torch._assert_async(torch.tensor(-0.1)) with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"): torch._assert_async(torch.tensor(0.0)) torch._assert_async(torch.tensor(True)) with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"): torch._assert_async(torch.tensor(False)) torch._assert_async(torch.tensor(0 + 0.1j)) with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"): torch._assert_async(torch.tensor(0 + 0j)) # NB: we must not be built with CUDA; if we are built with CUDA but no CUDA # is available, we get a different error. @unittest.skipIf(torch.backends.cuda.is_built() or IS_SANDCASTLE, "CUDA is built, can't test CUDA not built error") def test_cuda_not_built(self): msg = "Torch not compiled with CUDA enabled" self.assertRaisesRegex(AssertionError, msg, lambda: torch.cuda.current_device()) self.assertRaisesRegex(AssertionError, msg, lambda: torch.tensor([1], device="cuda")) self.assertRaisesRegex(AssertionError, msg, lambda: torch.tensor([1]).cuda()) self.assertRaisesRegex(TypeError, msg, lambda: torch.cuda.FloatTensor()) self.assertRaisesRegex(TypeError, msg, lambda: torch.set_default_tensor_type(torch.cuda.FloatTensor)) self.assertRaisesRegex(AssertionError, msg, lambda: torch.tensor([1]).to(device="cuda")) def test_has_internal_overlap(self): OVERLAP_NO = 0 OVERLAP_YES = 1 OVERLAP_TOO_HARD = 2 # Check for contiguous tensors a = torch.randn(3, 3) self.assertEqual(torch._debug_has_internal_overlap(a), OVERLAP_NO) # Checks for zero strides b = torch.randn(1, 3) b_expanded = b.expand(4, 3) self.assertEqual(torch._debug_has_internal_overlap(b_expanded), OVERLAP_YES) # Check for zero strided, size 1 axis, in non-contiguous storage (gh-33812) c = torch.randn(10).as_strided([2, 1, 5], [1, 0, 2]) self.assertEqual(torch._debug_has_internal_overlap(c), OVERLAP_NO) c = torch.randn(2, 1, 10)[::2].as_strided((2, 1, 5), (10, 0, 2)) self.assertEqual(torch._debug_has_internal_overlap(c), OVERLAP_TOO_HARD) def test_allow_tensor_metadata_change(self): def do_test(t): with self.assertRaisesRegex( RuntimeError, "set_sizes_contiguous is not allowed on a Tensor created from .data or .detach()"): t.resize_((2, 1)) with self.assertRaisesRegex( RuntimeError, "set_storage is not allowed on a Tensor created from .data or .detach()"): t.set_() with self.assertRaisesRegex( RuntimeError, "set_storage_offset is not allowed on a Tensor created from .data or .detach()"): t.set_(t.storage(), 0, t.size(), list(t.stride())) do_test(torch.tensor([[1, 2]]).data) do_test(torch.tensor([[1, 2]]).detach()) def test_c10_layer_norm(self): # test that we can call c10 ops and they return a reasonable result X = torch.rand(5, 5, dtype=torch.float) weight = torch.rand(*X.size()[1:], dtype=torch.float) bias = torch.rand(*X.size()[1:], dtype=torch.float) epsilon = 1e-4 expected_norm = torch.nn.functional.layer_norm( X, X.size()[1:], weight=weight, bias=bias, eps=epsilon) actual_norm, actual_mean, actual_stdev = \ torch.ops._caffe2.LayerNorm(torch.tensor(X), torch.tensor( weight), torch.tensor(bias), 1, epsilon, True) torch.testing.assert_allclose(expected_norm, actual_norm) def test_memory_format(self): def test_helper(x, memory_format): y = x.contiguous(memory_format=memory_format) self.assertFalse(y.is_contiguous()) self.assertTrue(y.is_contiguous(memory_format=memory_format)) self.assertEqual(y, x) test_helper(torch.randn(4, 3, 8, 8), torch.channels_last) test_helper(torch.randn(4, 3, 8, 8, 8), torch.channels_last_3d) def test_memory_format_contiguous_returns_same_tensor_if_already_satisfies(self): def test_helper(x, memory_format): alias = x.contiguous(memory_format=memory_format) alias.fill_(7) self.assertEqual(x, alias) test_helper(torch.randn(4, 8, 8, 3).permute(0, 3, 1, 2), torch.channels_last) test_helper(torch.randn(4, 8, 8, 8, 3).permute(0, 4, 1, 2, 3), torch.channels_last_3d) def test_memory_format_empty(self): def test_helper(dim1, dim2, memory_format): with self.assertRaises(RuntimeError): x = torch.empty(dim1, memory_format=memory_format) x = torch.empty(dim2, memory_format=memory_format) self.assertTrue(x.is_contiguous(memory_format=memory_format)) test_helper((3, 3), (3, 3, 3, 3), torch.channels_last) test_helper((3, 3, 3), (3, 3, 3, 3, 3), torch.channels_last_3d) def test_subclass_tensors(self): # raise an error when trying to subclass FloatTensor with self.assertRaisesRegex(TypeError, "type 'torch.FloatTensor' is not an acceptable base type"): class Foo1(torch.FloatTensor): pass # but allow subclassing Tensor: class Foo2(torch.Tensor): def foo(self): return 5 f = Foo2() self.assertEqual(f.foo(), 5) def test_ndim(self): a = torch.randn(1, 2, 3) self.assertEqual(3, a.ndim) b = torch.randn(()) self.assertEqual(0, b.ndim) c = torch.randn(1, 0) self.assertEqual(2, c.ndim) def test_fill_diagonal(self): a1 = torch.randn(7, 3) a2 = a1.clone() v = 1 for i in range(3): a2[i][i] = v a1.fill_diagonal_(v) self.assertEqual(a1, a2) b1 = torch.randn(7, 3) b2 = b1.clone() for i in range(3): b2[i][i] = v b2[i + 4][i] = v b1.fill_diagonal_(v, wrap=True) self.assertEqual(b1, b2) c1 = torch.rand(3, 3, 3) c2 = c1.clone() for i in range(3): c2[i][i][i] = v c1.fill_diagonal_(v) self.assertEqual(c1, c2) # non-contiguous tensor d1 = torch.rand(3, 3, 3)[:, 1, ...] d2 = d1.clone() for i in range(3): d2[i][i] = v d1.fill_diagonal_(v) self.assertEqual(d1, d2) e1 = torch.rand(7, 3, 3)[:, 1, ...] e2 = e1.clone() for i in range(3): e2[i][i] = v e2[i + 4][i] = v e1.fill_diagonal_(v, wrap=True) self.assertEqual(e1, e2) def test_batch_norm_cpu_inference(self): # input nchw in (2,1,1,1), (2,2,2,2) inputs = [ torch.tensor([[[[-0.5000]]], [[[0.5000]]]]), torch.tensor([ [ [[-0.5000, 0.5000], [-1.0000, 1.0000]], [[-0.2500, -0.5000], [0.2500, 0.5000]] ], [ [[0.1000, 1.0000], [1.0000, 0.1000]], [[1.0000, 0.5000], [1.5000, -1.5000]] ]])] # output nchw in (2,1,1,1), (2,2,2,2) outputs = [ torch.tensor([ [[[-0.499997496604919433593750000]]], [[[0.499997496604919433593750000]]]]), torch.tensor([ [[[-0.499997496604919433593750000, 0.499997496604919433593750000], [-0.999994993209838867187500000, 0.999994993209838867187500000]], [[-0.249998748302459716796875000, -0.499997496604919433593750000], [0.249998748302459716796875000, 0.499997496604919433593750000]]], [[[0.099999502301216125488281250, 0.999994993209838867187500000], [0.999994993209838867187500000, 0.099999502301216125488281250]], [[0.999994993209838867187500000, 0.499997496604919433593750000], [1.499992489814758300781250000, -1.499992489814758300781250000]]]])] for i in range(len(inputs)): for affine in [False, True]: m = torch.nn.BatchNorm2d(inputs[i].size()[1], 1e-05, 0.1, affine=affine) m.eval() # contiguous case input1 = inputs[i].contiguous() output1 = m(input1) # non-contiguous case input2 = input1.permute(0, 1, 3, 2) output2 = m(input2).permute(0, 1, 3, 2) # channels last case input3 = input1.contiguous(memory_format=torch.channels_last) output3 = m(input3) self.assertEqual(output3, outputs[i]) self.assertEqual(output3, output1) self.assertEqual(output3, output2) @noarchTest def test_empty_meta(self): x = torch.empty(2 ** 20, 2 ** 20, device='meta') y = torch.empty(2 ** 20, device='meta') z = x + y self.assertEqual(z.size(), (2 ** 20, 2 ** 20)) self.assertRaises(RuntimeError, lambda: z[0][0].item()) @noarchTest def test_upsample_nearest1d_meta(self): # TODO: this test should be triggered by test_nn.py but right # now meta is not enabled (and even if it was, we are probably # missing too many meta functions to get through the test unmolested) # NB: Can't make the exponent too big, or it will overflow # signed 64-bit integer x = torch.empty(2 * 10 ** 8, 3, 2 * 10 ** 8, device='meta') z = torch.nn.functional.interpolate(x, scale_factor=2) self.assertEqual(z.size(), (2 * 10 ** 8, 3, 4 * 10 ** 8)) self.assertRaises(RuntimeError, lambda: z[0][0][0].item()) # TODO: the out tests cannot be triggered by test_nn.py because # we don't actually do out= arguments for nn functions, so there # is no public API by which to get the out version # interpolate doesn't seem to support out= # (not sure why passing None here doesn't work? How strange...) z = torch.empty(0, device='meta') torch._C._nn.upsample_nearest1d(x, (4 * 10 ** 8,), 2, out=z) self.assertEqual(z.size(), (2 * 10 ** 8, 3, 4 * 10 ** 8)) self.assertRaises(RuntimeError, lambda: z[0][0][0].item()) @noarchTest def test_upsample_nearest2d_meta(self): # TODO: the out tests cannot be triggered by test_nn.py because # we don't actually do out= arguments for nn functions, so there # is no public API by which to get the out version # Make sure we don't clobber strides of out tensor. NB: this # test must be done on 2d/3d, because 1d doesn't have any meaningful # layout support x = torch.empty(4, 3, 8, 8, device='meta') out = torch.empty(4, 3, 16, 16, device='meta', memory_format=torch.channels_last) torch._C._nn.upsample_nearest2d(x, (16, 16), out=out) self.assertTrue(out.is_contiguous(memory_format=torch.channels_last)) x = torch.empty(4, 3, 8, 8, device='meta', memory_format=torch.channels_last) out = torch.empty(4, 3, 16, 16, device='meta') torch._C._nn.upsample_nearest2d(x, (16, 16), out=out) self.assertTrue(out.is_contiguous()) # But if resize occurs, do clobber x = torch.empty(4, 3, 8, 8, device='meta', memory_format=torch.channels_last) out = torch.empty(0, device='meta') torch._C._nn.upsample_nearest2d(x, (16, 16), out=out) self.assertTrue(out.is_contiguous(memory_format=torch.channels_last)) # Complain if out dtype mismatch x = torch.empty(4, 3, 8, 8, device='meta', dtype=torch.float) out = torch.empty(4, 3, 16, 16, device='meta', dtype=torch.double) self.assertExpectedRaisesInline( RuntimeError, lambda: torch._C._nn.upsample_nearest2d(x, (16, 16), out=out), """Expected out tensor to have dtype float, but got double instead""" ) # Complain if out device mismatch x = torch.empty(0, 3, 8, 8, device='meta') out = torch.empty(0, 3, 16, 16, device='cpu') self.assertExpectedRaisesInline( RuntimeError, lambda: torch._C._nn.upsample_nearest2d(x, (16, 16), out=out), """Expected out tensor to have device meta, but got cpu instead""" ) @noarchTest def test_detach_meta(self): x = torch.empty(2, device='meta') # This used to segfault self.assertRaises(RuntimeError, lambda: x.detach().storage()) @noarchTest def test_add_meta_scalar(self): # From https://github.com/pytorch/pytorch/issues/53815 x = torch.empty(2, device='meta') y = x + 2 self.assertEqual(y.size(), x.size()) def test_normal_shape(self): warned = False for device in torch.testing.get_all_device_types(): tensor1 = torch.rand(1, device=device) tensor4 = torch.rand(4, device=device) tensor120 = torch.rand(120, device=device) tensor2145 = torch.rand(2, 1, 4, 5, device=device) tensor2345 = torch.rand(2, 3, 4, 5, device=device) tensor2345_non_contiguous = torch.rand(2, 4, 3, 5, device=device).permute(0, 2, 1, 3) tensor2345_channels_last = tensor2345.contiguous(memory_format=torch.channels_last) output2345 = torch.zeros(2, 3, 4, 5, device=device) output345 = torch.zeros(3, 4, 5, device=device) # inputs have same size self.assertEqual(torch.normal(tensor2345, tensor2345).size(), (2, 3, 4, 5)) self.assertEqual(torch.normal(tensor2345_non_contiguous, tensor2345).size(), (2, 3, 4, 5)) self.assertEqual(torch.normal(tensor2345, tensor2345_channels_last).size(), (2, 3, 4, 5)) self.assertEqual(torch.normal(tensor2345_non_contiguous, tensor2345_channels_last).size(), (2, 3, 4, 5)) # scalar case self.assertEqual(torch.normal(tensor2345, 2).size(), (2, 3, 4, 5)) self.assertEqual(torch.normal(2, tensor2345).size(), (2, 3, 4, 5)) # inputs are expandable tensors self.assertEqual(torch.normal(tensor2345, tensor1).size(), (2, 3, 4, 5)) self.assertEqual(torch.normal(tensor2145, tensor2345).size(), (2, 3, 4, 5)) # inputs are non-expandable tensors, but they have same number of elements # TORCH_WARN_ONCE is used in torch.normal, only 1st assertEqual will show warn msg if not warned: self.assertWarnsRegex(UserWarning, "deprecated and the support will be removed", lambda: self.assertEqual(torch.normal(tensor120, tensor2345).size(), (120,))) warned = True else: self.assertEqual(torch.normal(tensor120, tensor2345).size(), (120,)) self.assertEqual(torch.normal(tensor2345, tensor120).size(), (2, 3, 4, 5)) # inputs are non-expandable tensors and they don't have same number of elements with self.assertRaisesRegex(RuntimeError, "inconsistent tensor"): torch.normal(tensor2345, tensor4) # output and inputs are size compatible self.assertEqual(torch.normal(tensor2345, tensor2345, out=output2345).size(), (2, 3, 4, 5)) # output and inputs are not size compatible with self.assertRaisesRegex(RuntimeError, "inconsistent tensor"): # inputs are expandable but have different broadcasted size than output torch.normal(tensor2345, tensor2145, out=output345) with self.assertRaisesRegex(RuntimeError, "inconsistent tensor"): # inputs are not expandable but reshapeable, output size is not the same as mean torch.normal(tensor2345, tensor120, out=output345) def test_tensoriterator_output_setup(self): # Test whether the output's memory layout is correct def test_memory_layout(x, y, scale, zero_point, out): self.assertEqual(x.dim(), 4) self.assertEqual(x.size(), y.size()) self.assertEqual(y.size(), out.size()) shape = x.size() for n in range(shape[0]): for c in range(shape[1]): for h in range(shape[2]): for w in range(shape[3]): if scale is not None and zero_point is not None: self.assertEqual( out[n][c][h][w], torch.ops.quantized.add(x[n][c][h][w], y[n][c][h][w], scale, zero_point)) else: self.assertEqual(out[n][c][h][w], x[n][c][h][w] + y[n][c][h][w]) xraw = torch.rand(2, 3, 4, 4) yraw = torch.rand(2, 3, 4, 4) qxraw = torch.quantize_per_tensor(xraw, 0.1, 5, torch.quint8) qyraw = torch.quantize_per_tensor(yraw, 0.1, 5, torch.quint8) # contiguous case fast setup test_memory_layout(xraw, yraw, None, None, xraw + yraw) test_memory_layout(qxraw, qyraw, 0.1, 5, torch.ops.quantized.add(qxraw, qyraw, 0.1, 5)) # channels last case fast setup x = xraw.contiguous(memory_format=torch.channels_last) y = yraw.contiguous(memory_format=torch.channels_last) test_memory_layout(x, y, None, None, x + y) qx = qxraw.contiguous(memory_format=torch.channels_last) qy = qyraw.contiguous(memory_format=torch.channels_last) test_memory_layout(qx, qy, 0.1, 5, torch.ops.quantized.add(qx, qy, 0.1, 5)) # non contiguous case fast setup (dense, non-overlapping, same shape and strides) x = xraw.permute(0, 2, 3, 1) y = yraw.permute(0, 2, 3, 1) test_memory_layout(x, y, None, None, x + y) qx = qxraw.permute(0, 2, 3, 1) qy = qyraw.permute(0, 2, 3, 1) test_memory_layout(qx, qy, 0.1, 5, torch.ops.quantized.add(qx, qy, 0.1, 5)) # non contiguous case fast setup (dense, non-overlapping) # input tensors have same shape and strides # output tensor have same shape as input tensors but different stride # output tensor should preserve its strides in this case x = xraw.permute(0, 2, 3, 1) y = yraw.permute(0, 2, 3, 1) out = torch.empty_like(xraw) out = out.permute(0, 3, 2, 1) expected_stride = out.stride() test_memory_layout(x, y, None, None, torch.add(x, y, out=out)) self.assertEqual(expected_stride, out.stride()) # non contiguous case non fast setup x = xraw.permute(0, 2, 3, 1) y = yraw.permute(0, 3, 2, 1) test_memory_layout(x, y, None, None, x + y) qx = qxraw.permute(0, 2, 3, 1) qy = qyraw.permute(0, 3, 2, 1) test_memory_layout(qx, qy, 0.1, 5, torch.ops.quantized.add(qx, qy, 0.1, 5)) # Tests to make sure we still handle .data properly until it is removed def test_dot_data_use(self): # .data allows to change the Tensors types inplace, check that we still # raise a nice error. with self.assertRaisesRegex( RuntimeError, # message includes both Double and Long '(?=.*Double)(?=.*Long)'): # Calls model with a LongTensor input but DoubleTensor weights input = torch.randn(1, 1, 1, 6, dtype=torch.double) weight = torch.zeros(1, 1, 1, 3, dtype=torch.long) model = torch.nn.Conv2d(1, 1, (1, 3), stride=1, padding=0, bias=False) model.weight.data = weight out = model(input) # Functions to test negative dimension wrapping METHOD = 1 INPLACE_METHOD = 2 FUNCTIONAL = 4 DIM_ARG = None def make_neg_dim_test(name, tensor_arg, arg_constr, types, extra_dim=0): def neg_dim_test(self): if isinstance(tensor_arg, list): assert METHOD not in types and INPLACE_METHOD not in types x = [torch.randn(arg) for arg in tensor_arg] ndim = len(tensor_arg[-1]) else: x = torch.randn(*tensor_arg) ndim = len(tensor_arg) ndim += extra_dim n_dim_to_test = sum(e is DIM_ARG for e in arg_constr()) for dims_val in combinations(range(ndim), n_dim_to_test): arg = arg_constr() arg_neg = copy.deepcopy(arg) idx = 0 for i, v in enumerate(arg): if v is DIM_ARG: arg[i] = dims_val[idx] arg_neg[i] = dims_val[idx] - ndim idx += 1 if METHOD in types: a = getattr(x, name)(*arg) b = getattr(x, name)(*arg_neg) self.assertEqual(a, b) if INPLACE_METHOD in types: a = x.clone() getattr(a, name + '_')(*arg) b = x.clone() getattr(b, name + '_')(*arg_neg) self.assertEqual(a, b) if FUNCTIONAL in types: a = getattr(torch, name)(x, *arg) b = getattr(torch, name)(x, *arg_neg) self.assertEqual(a, b) return neg_dim_test def idx_tensor(size, max_val): return torch.LongTensor(*size).random_(0, max_val - 1) def add_neg_dim_tests(): neg_dim_tests = [ ('narrow', (10, 20, 30), lambda: [DIM_ARG, 0, 5], [METHOD]), ('transpose', (10, 20, 30), lambda: [DIM_ARG, DIM_ARG], [METHOD, INPLACE_METHOD, FUNCTIONAL]), ('size', (10, 20, 30), lambda: [DIM_ARG], [METHOD]), ('cat', [(2, 3, 4), (2, 3, 4)], lambda: [DIM_ARG], [FUNCTIONAL]), ('chunk', (10, 20, 30), lambda: [5, DIM_ARG], [METHOD, FUNCTIONAL]), ('gather', (10, 20), lambda: [DIM_ARG, idx_tensor((10, 20), 10)], [METHOD, FUNCTIONAL]), ('index_select', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10)], [METHOD, FUNCTIONAL]), ('split', (10, 20), lambda: [5, DIM_ARG], [METHOD, FUNCTIONAL]), ('squeeze', (10, 1, 20, 1), lambda: [DIM_ARG], [METHOD, INPLACE_METHOD, FUNCTIONAL]), ('unbind', (2, 3, 4), lambda: [DIM_ARG], [FUNCTIONAL]), ('unsqueeze', (10, 20), lambda: [DIM_ARG], [METHOD, INPLACE_METHOD, FUNCTIONAL], 1), ('logcumsumexp', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('cumprod', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('cumsum', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('cummax', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('cummin', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('mean', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('median', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('nanmedian', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('mode', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('norm', (10, 20), lambda: [2, DIM_ARG], [METHOD, FUNCTIONAL]), ('prod', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('std', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('sum', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('var', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('kthvalue', (10, 20), lambda: [3, DIM_ARG], [METHOD, FUNCTIONAL]), ('max', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('min', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('sort', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('topk', (10, 20), lambda: [5, DIM_ARG], [METHOD, FUNCTIONAL]), ('renorm', (10, 20), lambda: [2, DIM_ARG, 1], [METHOD, INPLACE_METHOD, FUNCTIONAL]), ('index_add', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10), torch.randn(10, 10)], [INPLACE_METHOD]), ('index_copy', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10), torch.randn(10, 10)], [INPLACE_METHOD]), ('index_fill', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10), 12], [INPLACE_METHOD]), ('scatter', (10, 10), lambda: [DIM_ARG, idx_tensor((10, 10), 10), torch.randn(10, 10)], [INPLACE_METHOD]), ('select', (10, 20), lambda: [DIM_ARG, 3], [METHOD]), ('unfold', (10, 20), lambda: [DIM_ARG, 5, 2], [METHOD]), ] for decl in neg_dim_tests: if len(decl) == 4: name, tensor_arg, arg_constr, types = decl extra_dim = 0 elif len(decl) == 5: name, tensor_arg, arg_constr, types, extra_dim = decl test_name = 'test_' + name + '_neg_dim' assert not hasattr(AbstractTestCases._TestTorchMixin, test_name), "Duplicated test name: " + test_name setattr(AbstractTestCases._TestTorchMixin, test_name, make_neg_dim_test(name, tensor_arg, arg_constr, types, extra_dim)) @contextlib.contextmanager def torch_vital_set(value): stash = None if 'TORCH_VITAL' in os.environ: stash = os.environ['TORCH_VITAL'] os.environ['TORCH_VITAL'] = value try: yield finally: if stash: os.environ['TORCH_VITAL'] = stash else: del os.environ['TORCH_VITAL'] # Tests Vital Signs for Torch class TestBasicVitalSigns(TestCase): def test_basic_vitals(self): with torch_vital_set(''): self.assertFalse(torch.vitals_enabled()) with torch_vital_set('ON'): self.assertTrue(torch.vitals_enabled()) def test_basic_vitals_read_write(self): with torch_vital_set('ON'): self.assertTrue(torch.vitals_enabled()) # This tests the code path of setting a vital self.assertTrue(torch.set_vital('Dataloader', 'basic_unit_test', 'TEST_VALUE_STRING')) self.assertIn('TEST_VALUE_STRING', torch.read_vitals()) self.assertIn('CUDA.used', torch.read_vitals()) class TestVitalSignsCuda(TestCase): @onlyCUDA def test_cuda_vitals_gpu_only(self, device): with torch_vital_set('ON'): self.assertIn('CUDA.used\t\t true', torch.read_vitals()) # Device-generic tests. Instantiated below and not run directly. class TestTorchDeviceType(TestCase): exact_dtype = True # TODO: move all tensor creation to common ops def _rand_shape(self, dim, min_size, max_size): shape = [] for i in range(dim): shape.append(random.randint(min_size, max_size)) return tuple(shape) @onlyCPU def test_set_deterministic_deprecated_warning(self, device): with DeterministicGuard(torch.are_deterministic_algorithms_enabled()): # Calling set_deterministic throws a warning about deprecation once # per process but testing this is tricky here since we actually get # two warnings: one for the deprecated use of `set_deterministic` # and one for the 'beta' use of `use_deterministic_algorithms`. # The assertWarnsOnceRegex cannot handle two different warnings with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised prev = torch.is_warn_always_enabled() torch.set_warn_always(True) try: torch.set_deterministic(True) finally: torch.set_warn_always(prev) for w in ws: txt = str(w.message) assert ("torch.use_deterministic_algorithms is in beta" in txt or "torch.set_deterministic is deprecated" in txt) @onlyCPU def test_is_deterministic_deprecated_warning(self, device): with DeterministicGuard(torch.are_deterministic_algorithms_enabled()): # Calling is_deterministic throws a warning about deprecation once per process with self.assertWarnsOnceRegex(UserWarning, "torch.is_deterministic is deprecated"): torch.is_deterministic() # Validates that mathematical constants are defined properly, as required by # the Python Array API (https://data-apis.org/array-api/latest/API_specification/constants.html) @onlyCPU def test_constants(self, device): self.assertIsInstance(torch.e, float) self.assertEqual(torch.e, math.e, atol=0, rtol=0) self.assertIsInstance(torch.pi, float) self.assertEqual(torch.pi, math.pi, atol=0, rtol=0) self.assertIsInstance(torch.nan, float) self.assertEqual(torch.nan, math.nan, equal_nan=True) self.assertIsInstance(torch.inf, float) self.assertEqual(torch.inf, math.inf) @dtypes(torch.float32, torch.complex64) def test_storage(self, device, dtype): v = torch.randn(3, 5, dtype=dtype, device=device) self.assertEqual(v.storage()[0], v[0][0]) self.assertEqual(v.storage()[14], v[2][4]) @dtypes(torch.float32, torch.complex64) def test_deepcopy(self, device, dtype): from copy import deepcopy a = torch.randn(5, 5, dtype=dtype, device=device) b = torch.randn(5, 5, dtype=dtype, device=device) c = a.view(25) q = [a, [a.storage(), b.storage()], b, c] w = deepcopy(q) self.assertEqual(w[0], q[0], atol=0, rtol=0) self.assertEqual(w[1][0], q[1][0], atol=0, rtol=0) self.assertEqual(w[1][1], q[1][1], atol=0, rtol=0) self.assertEqual(w[1], q[1], atol=0, rtol=0) self.assertEqual(w[2], q[2], atol=0, rtol=0) # Check that deepcopy preserves sharing w[0].add_(1) for i in range(a.numel()): self.assertEqual(w[1][0][i], q[1][0][i] + 1) self.assertEqual(w[3], c + 1) w[2].sub_(1) for i in range(a.numel()): self.assertEqual(w[1][1][i], q[1][1][i] - 1) @dtypes(torch.float32, torch.complex64) def test_deepcopy_scalar(self, device, dtype): from copy import deepcopy a = torch.tensor(5, dtype=dtype, device=device) self.assertEqual(a.size(), deepcopy(a).size()) self.assertEqual(a, deepcopy(a)) def check_internal_mem_overlap(self, inplace_op, num_inputs, dtype, device, expected_failure=False): if isinstance(inplace_op, str): inplace_op = getattr(torch.Tensor, inplace_op) input = torch.randn(1, dtype=dtype, device=device).expand(3, 3) inputs = [input] + [torch.randn_like(input) for i in range(num_inputs - 1)] if not expected_failure: with self.assertRaisesRegex(RuntimeError, 'single memory location'): inplace_op(*inputs) else: with self.assertRaises(AssertionError): with self.assertRaisesRegex(RuntimeError, 'single memory location'): inplace_op(*inputs) def unary_check_input_output_mem_overlap(self, data, sz, op, expected_failure=False): def _test(op, output, input): output_exp = torch.empty_like(output) op(input, out=output_exp) self.assertEqual(op(input, out=output), output_exp, msg=op.__name__) # output is identical to input: _test(op, output=data[0:sz], input=data[0:sz]) # output and input are independent: _test(op, output=data[0:sz], input=data[sz:2 * sz]) # output partially overlaps with input: if not expected_failure: with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): _test(op, data[0:sz], data[1:sz + 1]) else: with self.assertRaises(AssertionError): with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): _test(op, data[0:sz], data[1:sz + 1]) # output is transpose of input: length = int(math.sqrt(sz)) input = data[:length**2].view([length, length]) out = input.t() if not expected_failure: with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): _test(op, out, input) else: with self.assertRaises(AssertionError): with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): _test(op, out, input) def ternary_check_input_output_mem_overlap(self, op, device, expected_failure=False): sz = 9 data = torch.randn(2 * sz, device=device) other1 = torch.randn(sz, device=device) other2 = torch.randn(sz, device=device) self.unary_check_input_output_mem_overlap( data, sz, lambda input, out: op(input, other1.view(input.shape), other2.view(input.shape), out=out), expected_failure=expected_failure) self.unary_check_input_output_mem_overlap( data, sz, lambda input, out: op(other1.view(input.shape), input, other2.view(input.shape), out=out), expected_failure=expected_failure) self.unary_check_input_output_mem_overlap( data, sz, lambda input, out: op(other1.view(input.shape), other2.view(input.shape), input, out=out), expected_failure=expected_failure) def _select_broadcastable_dims(self, dims_full=None): # select full dimensionality if dims_full is None: dims_full = [] ndims = random.randint(1, 4) dims_full = [random.randint(1, 8) for _ in range(ndims)] else: ndims = len(dims_full) # select actual dimensions for ops: # larger: full ndims, individual sizes may be reduced # smaller: possibly reduced ndims, sizes may be reduced smaller_ndims = random.randint(1, ndims) dims_small = [] dims_large = [] for i in range(ndims - 1, -1, -1): j = random.randint(1, 3) if j == 1: # no reduced singleton dimension ds = dims_full[i] dl = dims_full[i] elif j == 2: # larger may have reduced singleton dimension ds = dims_full[i] dl = 1 if len(dims_small) < smaller_ndims else dims_full[i] elif j == 3: # smaller may have reduced singleton dimension ds = 1 dl = dims_full[i] dims_large = [dl] + dims_large if len(dims_small) < smaller_ndims: dims_small = [ds] + dims_small return (dims_small, dims_large, dims_full) # collected tests of ops that used scalar_check in Declarations.cwrap for # correctness def test_scalar_check(self, device): zero_d = torch.randn((), device=device) one_d = torch.randn((1,), device=device) # remainder self.assertEqual((), torch.remainder(zero_d, zero_d).shape) self.assertEqual((), torch.remainder(zero_d, 2).shape) self.assertEqual((1,), torch.remainder(zero_d, one_d).shape) self.assertEqual((1,), torch.remainder(one_d, zero_d).shape) # fmod self.assertEqual((), torch.fmod(zero_d, zero_d).shape) self.assertEqual((), torch.fmod(zero_d, 2).shape) self.assertEqual((1,), torch.fmod(zero_d, one_d).shape) self.assertEqual((1,), torch.fmod(one_d, zero_d).shape) # exp, cos, cosh, tan, atan, tanh, erf, erfc, reciprocal self.assertEqual((), torch.exp(zero_d).shape) self.assertEqual((), torch.cos(zero_d).shape) self.assertEqual((), torch.cosh(zero_d).shape) self.assertEqual((), torch.tan(zero_d).shape) self.assertEqual((), torch.atan(zero_d).shape) self.assertEqual((), torch.acosh(zero_d).shape) self.assertEqual((), torch.asinh(zero_d).shape) self.assertEqual((), torch.atanh(zero_d).shape) self.assertEqual((), torch.tanh(zero_d).shape) self.assertEqual((), torch.erf(zero_d).shape) self.assertEqual((), torch.erfc(zero_d).shape) self.assertEqual((), torch.reciprocal(zero_d).shape) self.assertEqual((1,), torch.exp(one_d).shape) self.assertEqual((1,), torch.cos(one_d).shape) self.assertEqual((1,), torch.cosh(one_d).shape) self.assertEqual((1,), torch.tan(one_d).shape) self.assertEqual((1,), torch.atan(one_d).shape) self.assertEqual((1,), torch.acosh(one_d).shape) self.assertEqual((1,), torch.asinh(one_d).shape) self.assertEqual((1,), torch.atanh(one_d).shape) self.assertEqual((1,), torch.tanh(one_d).shape) self.assertEqual((1,), torch.erf(one_d).shape) self.assertEqual((1,), torch.erfc(one_d).shape) self.assertEqual((1,), torch.reciprocal(one_d).shape) # clamp self.assertEqual((), torch.clamp(zero_d, min=0, max=1).shape) self.assertEqual((), torch.clamp(zero_d, min=0).shape) self.assertEqual((), torch.clamp(zero_d, max=1).shape) self.assertEqual((1,), torch.clamp(one_d, min=0, max=1).shape) self.assertEqual((1,), torch.clamp(one_d, min=0).shape) self.assertEqual((1,), torch.clamp(one_d, max=1).shape) # cumsum, cumprod, cummax, cummin self.assertEqual((), torch.logcumsumexp(zero_d, 0).shape) self.assertEqual((), torch.cumsum(zero_d, 0).shape) self.assertEqual((), torch.cumprod(zero_d, 0).shape) self.assertEqual((), torch.cummax(zero_d, 0)[0].shape) self.assertEqual((), torch.cummin(zero_d, 0)[0].shape) # renorm self.assertRaises(RuntimeError, lambda: torch.renorm(zero_d, 0.5, 0, 1.0)) # sort, topk self.assertEqual([(), ()], [x.shape for x in torch.sort(zero_d, 0, False)]) self.assertEqual([(), ()], [x.shape for x in torch.sort(zero_d, 0, True)]) self.assertEqual([(), ()], [x.shape for x in torch.topk(zero_d, 1, 0, False)]) self.assertEqual([(), ()], [x.shape for x in torch.topk(zero_d, 1, 0, True)]) # lstsq (gels) self.assertRaises(RuntimeError, lambda: torch.lstsq(zero_d, zero_d)) # eig self.assertRaises(RuntimeError, lambda: torch.eig(zero_d, False)) self.assertRaises(RuntimeError, lambda: torch.eig(zero_d, True)) # this is only implemented on cpu if (torch.device(device).type == 'cpu'): self.assertRaises(RuntimeError, lambda: torch.ormqr(zero_d, zero_d, zero_d)) # max, min self.assertEqual((), torch.max(zero_d, zero_d).shape) self.assertEqual((1,), torch.max(one_d, zero_d).shape) self.assertEqual((1,), torch.max(zero_d, one_d).shape) self.assertEqual((), torch.min(zero_d, zero_d).shape) self.assertEqual((1,), torch.min(one_d, zero_d).shape) self.assertEqual((1,), torch.min(zero_d, one_d).shape) # diag self.assertRaises(RuntimeError, lambda: torch.diag(zero_d)) zero_d_int = torch.tensor(1, device=device) one_d_int = torch.tensor([1], device=device) # lshift, rshift self.assertEqual((), (zero_d_int >> zero_d_int).shape) self.assertEqual((), (zero_d_int >> 1).shape) self.assertEqual((1,), (one_d_int >> zero_d_int).shape) self.assertEqual((1,), (zero_d_int >> one_d_int).shape) self.assertEqual((1,), (one_d_int >> 1).shape) self.assertEqual((), (zero_d_int << zero_d_int).shape) self.assertEqual((), (zero_d_int << 1).shape) self.assertEqual((1,), (one_d_int << zero_d_int).shape) self.assertEqual((1,), (zero_d_int << one_d_int).shape) self.assertEqual((1,), (one_d_int << 1).shape) # or self.assertEqual((), (zero_d_int | zero_d_int).shape) self.assertEqual((), (zero_d_int | 1).shape) self.assertEqual((1,), (one_d_int | zero_d_int).shape) self.assertEqual((1,), (zero_d_int | one_d_int).shape) self.assertEqual((1,), (one_d_int | 1).shape) # and self.assertEqual((), (zero_d_int & zero_d_int).shape) self.assertEqual((), (zero_d_int & 1).shape) self.assertEqual((1,), (one_d_int & zero_d_int).shape) self.assertEqual((1,), (zero_d_int & one_d_int).shape) self.assertEqual((1,), (one_d_int & 1).shape) # clone self.assertEqual((), zero_d.clone().shape) zero_d_bool = torch.tensor(True, device=device) one_d_bool = torch.tensor([True], device=device) # masked_select self.assertEqual((1,), torch.masked_select(zero_d_bool, zero_d_bool).shape) self.assertEqual((1,), torch.masked_select(zero_d_bool, one_d_bool).shape) self.assertEqual((1,), torch.masked_select(one_d_bool, zero_d_bool).shape) zero_d_uint8 = torch.tensor(1, dtype=torch.uint8, device=device) one_d_uint8 = torch.tensor([1], dtype=torch.uint8, device=device) with warnings.catch_warnings(): warnings.simplefilter("ignore") self.assertEqual((1,), torch.masked_select(zero_d_uint8, zero_d_uint8).shape) self.assertEqual((1,), torch.masked_select(zero_d_uint8, one_d_uint8).shape) self.assertEqual((1,), torch.masked_select(one_d_uint8, zero_d_uint8).shape) # mode self.assertEqual([(), ()], [x.shape for x in torch.mode(zero_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.mode(zero_d, dim=0, keepdim=False)]) self.assertEqual([(1,), (1,)], [x.shape for x in torch.mode(one_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.mode(one_d, dim=0, keepdim=False)]) # max self.assertEqual([(), ()], [x.shape for x in torch.max(zero_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.max(zero_d, dim=0, keepdim=False)]) self.assertEqual([(1,), (1,)], [x.shape for x in torch.max(one_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.max(one_d, dim=0, keepdim=False)]) # amax self.assertEqual((), torch.amax(zero_d, dim=0, keepdim=True).shape) self.assertEqual((), torch.amax(zero_d, dim=0, keepdim=False).shape) self.assertEqual((1,), torch.amax(one_d, dim=0, keepdim=True).shape) self.assertEqual((), torch.amax(one_d, dim=0, keepdim=False).shape) # min self.assertEqual([(), ()], [x.shape for x in torch.min(zero_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.min(zero_d, dim=0, keepdim=False)]) self.assertEqual([(1,), (1,)], [x.shape for x in torch.min(one_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.min(one_d, dim=0, keepdim=False)]) # amin self.assertEqual((), torch.amin(zero_d, dim=0, keepdim=True).shape) self.assertEqual((), torch.amin(zero_d, dim=0, keepdim=False).shape) self.assertEqual((1,), torch.amin(one_d, dim=0, keepdim=True).shape) self.assertEqual((), torch.amin(one_d, dim=0, keepdim=False).shape) # set_ zero_d_clone = zero_d.clone() one_d_clone = one_d.clone() self.assertEqual((), zero_d_clone.set_(one_d.storage(), 0, (), ()).shape) self.assertEqual((1,), zero_d_clone.set_(one_d.storage(), 0, (1,), (1,)).shape) self.assertEqual((), one_d_clone.set_(one_d.storage(), 0, (), ()).shape) self.assertEqual((1,), one_d_clone.set_(one_d.storage(), 0, (1,), (1,)).shape) self.assertEqual((), zero_d.clone().set_(zero_d).shape) self.assertEqual((), one_d.clone().set_(zero_d).shape) self.assertEqual((1,), zero_d.clone().set_(one_d).shape) self.assertEqual((1,), one_d.clone().set_(one_d).shape) # take self.assertEqual((), torch.randn((2, 3), device=device).take(zero_d_int).shape) self.assertEqual((1,), torch.randn((2, 3), device=device).take(one_d_int).shape) # gather self.assertEqual((), torch.gather(zero_d, 0, torch.zeros((), dtype=torch.int64, device=device)).shape) self.assertEqual((1,), torch.gather(zero_d, 0, torch.zeros((1,), dtype=torch.int64, device=device)).shape) self.assertEqual((), torch.gather(one_d, 0, torch.zeros((), dtype=torch.int64, device=device)).shape) self.assertEqual((1,), torch.gather(one_d, 0, torch.zeros((1,), dtype=torch.int64, device=device)).shape) # normal # std must be >= 0 zero_d_ge_0 = torch.rand((), device=device) # documentation says out shape matches shape of mean self.assertEqual((), torch.normal(zero_d, zero_d_ge_0).shape) self.assertEqual((1,), torch.normal(one_d, zero_d_ge_0).shape) self.assertEqual((), torch.normal(1, zero_d_ge_0).shape) self.assertEqual((), torch.normal(zero_d, 1).shape) self.assertEqual((1,), torch.normal(one_d, 1).shape) # TODO: this behavior differs on CPU and GPU, see https://github.com/pytorch/pytorch/issues/30480. # self.assertEqual((), torch.normal(zero_d, one_d).shape) # self.assertEqual((), torch.normal(1, one_d).shape) # convolutions. Yes, we are testing nn.functional here; seems justified # given its similar to the other tests w = torch.randn(2, 1, 3, 3, device=device).div_(2).requires_grad_() self.assertRaises(RuntimeError, lambda: torch.nn.functional.conv2d(zero_d, w, groups=1)) self.assertRaises(RuntimeError, lambda: torch.nn.functional.conv2d(zero_d, w, groups=2)) # nll_loss -- verify input can't be 0-dimensional. self.assertRaises(ValueError, lambda: torch.nn.functional.nll_loss(zero_d, zero_d, reduction='none')) self.assertRaises(ValueError, lambda: torch.nn.functional.nll_loss(zero_d, one_d, reduction='none')) # verify output is 0-dimensional when reduction != 'none' for (input, target) in ((torch.randn(1, 1, device=device), torch.tensor([0], device=device)), (torch.randn(1, 1, 1, 1, device=device), torch.tensor([[[0]]], device=device))): self.assertEqual((), torch.nn.functional.nll_loss(input, target, reduction='mean').shape) self.assertEqual((), torch.nn.functional.nll_loss(input, target, reduction='sum').shape) # multilabel_margin_loss for input in (zero_d, one_d, torch.randn(1, 1, device=device)): for target in (torch.tensor(0, device=device), torch.tensor([0], device=device), torch.tensor([[0]], device=device)): if (input.dim() <= 1 and target.dim() <= 1) or (input.dim() == 2 and target.dim() == 2): output_shape = (target.shape[0],) if target.dim() == 2 else () self.assertEqual(output_shape, torch.nn.functional.multilabel_margin_loss(input, target, reduction='none').shape) self.assertEqual((), torch.nn.functional.multilabel_margin_loss(input, target, reduction='mean').shape) self.assertEqual((), torch.nn.functional.multilabel_margin_loss(input, target, reduction='sum').shape) else: self.assertRaises(RuntimeError, lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='none')) self.assertRaises(RuntimeError, lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='mean')) self.assertRaises(RuntimeError, lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='sum')) # multi_margin_loss for input in (zero_d, one_d, torch.randn(1, 1, device=device)): for target in (torch.tensor(0, device=device), torch.tensor([0], device=device)): self.assertEqual(target.shape, torch.nn.functional.multi_margin_loss(input, target, reduction='none').shape) self.assertEqual((), torch.nn.functional.multi_margin_loss(input, target, reduction='mean').shape) self.assertEqual((), torch.nn.functional.multi_margin_loss(input, target, reduction='sum').shape) # Uses mismatched arange out size to trigger a warning def test_cpp_warnings_have_python_context(self, device): # Creates long string in advance to avoid a too-long Python line s = ".+Triggered internally at.+RangeFactories.+" def cpp_warn_fn(): out = torch.empty((5,)) torch.arange(0, 3, out=out) return out # Checks eager-mode cpp warning with warnings.catch_warnings(record=True) as w: cpp_warn_fn() frameinfo = inspect.getframeinfo(inspect.currentframe()) warning = w[0] # Checks for cpp context in the warning message self.assertTrue(re.search(s, str(warning.message)) is not None) # Checks the Python features of the warning # Note: the eager mode warning refers to the line in the function # that throws the warning. self.assertEqual(frameinfo.lineno - 6, warning.lineno) self.assertEqual(len(w), 1) # Checks jitted cpp warning with warnings.catch_warnings(record=True) as w: scripted_cpp_warn_fn = torch.jit.script(cpp_warn_fn) scripted_cpp_warn_fn() warning = w[0] # Checks for cpp context in the warning message self.assertTrue(re.search(s, str(warning.message)) is not None) # Checks the Python features of the warning # Note: the jitted warning's lineno refers to the call to the jitted # function, which in our test suite has a layer of indirection # that makes checking the Python lineno fragile self.assertEqual(len(w), 1) # Checks jitted Python warning def warn_fn(): warnings.warn("Warning!") # The jit mimics an eager-mode Python warning in this case with warnings.catch_warnings(record=True) as w: scripted_warn_fn = torch.jit.script(warn_fn) scripted_warn_fn() frameinfo = inspect.getframeinfo(inspect.currentframe()) warning = w[0] self.assertTrue(re.search('Warning!', str(warning.message)) is not None) # Checks the Python features of the warning self.assertEqual(frameinfo.lineno - 6, warning.lineno) self.assertEqual(len(w), 1) @onlyCPU def test_warn_always_caught(self, device): # Check that we can catch a TORCH_WARN_ONCE warning twice # since assertWarnsOnceRegex uses set_warn_always(True) which changes # TORCH_WARN_ONCE to TORCH_WARN a = np.arange(10) a.flags.writeable = False with self.assertWarnsOnceRegex(UserWarning, '.*non-writeable.*'): torch.from_numpy(a) # OK, got it once, now try again with self.assertWarnsOnceRegex(UserWarning, '.*non-writeable.*'): torch.from_numpy(a) # Make sure emitting two warnings will pass the assertWarnsOnceRegex # context manager with self.assertWarnsOnceRegex(UserWarning, '.*non-writeable.*'): torch.from_numpy(a) torch.from_numpy(a) # TODO: this test should be in test_nn.py def test_conv_transposed_backward_agnostic_to_memory_format(self, device): in_channels = 64 out_channels = 128 scale_factor = 8 batch_size = 8 length = 16 conv = torch.nn.ConvTranspose1d( in_channels, out_channels, kernel_size=scale_factor * 2, stride=scale_factor).to(device) layer_norm = torch.nn.LayerNorm(out_channels).to(device) input_ = torch.randn(batch_size, in_channels, length).to(device).contiguous() input_ = conv(input_).contiguous() input_ = layer_norm(input_.transpose(1, 2).contiguous()).contiguous() input_.sum().backward() # TODO: this test should be in test_nn.py @onlyCUDA @largeTensorTest('12GB') def test_conv_transposed_large(self, device): # ConvTranspose3d works for large input tensors (gh-32866) in_channels = 64 out_channels = 128 kernel_size = 5 conv = torch.nn.ConvTranspose3d( in_channels, out_channels, kernel_size=kernel_size, stride=2, padding=2, output_padding=1).to(device) x = torch.rand([1, 64, 8, 128, 172]).to(device) y = conv(x) def test_is_set_to(self, device): t1 = torch.empty(3, 4, 9, 10, device=device) t2 = torch.empty(3, 4, 9, 10, device=device) t3 = torch.tensor([], device=device).set_(t1) t4 = t3.clone().resize_(12, 90) self.assertFalse(t1.is_set_to(t2)) self.assertTrue(t1.is_set_to(t3)) self.assertTrue(t3.is_set_to(t1), "is_set_to should be symmetric") self.assertFalse(t1.is_set_to(t4)) self.assertFalse(torch.tensor([]).is_set_to(torch.tensor([])), "Tensors with no storages should not appear to be set " "to each other") t1 = torch.tensor([True, True], dtype=torch.bool, device=device) t2 = torch.tensor([0], dtype=torch.bool, device=device).set_(t1) self.assertTrue(t1.is_set_to(t2)) # test that sizes must match t1 = torch.empty([2, 3, 4], device=device) t2 = t1.view(4, 3, 2) self.assertFalse(t1.is_set_to(t2)) self.assertFalse(t2.is_set_to(t1)) # test that legacy empty size behavior used to be respected (i.e. all # empty tensors were logically collapsed to size [0]). t1 = torch.empty([2, 5, 0], device=device) t2 = t1.view([0]) self.assertFalse(t1.is_set_to(t2)) self.assertFalse(t2.is_set_to(t1)) def test_broadcast(self, device): # all functions fns = { "dist", "atan2", "pow", "lerp", "add", "sub", "mul", "div", "fmod", "remainder", "eq", "ge", "gt", "le", "lt", "max", "min", "ne", "addcdiv", "addcmul", "masked_scatter", "masked_select", "masked_fill", "map", "map2", "copy" } # functions with three tensor arguments fns_3_args = {"map2"} fns_value_kwarg = {"addcdiv", "addcmul"} for fn in fns: (dims_small, dims_large, dims_full) = self._select_broadcastable_dims() full1d = torch.randn(*dims_full, device=device).flatten().float() small = torch.randn(*dims_small, device=device).float() large = torch.randn(*dims_large, device=device).float() small_expanded = small.expand(*dims_full) large_expanded = large.expand(*dims_full) small2 = None small2_expanded = None if fn in fns_3_args or fn in fns_value_kwarg: # create another smaller tensor (dims_small2, _, _) = self._select_broadcastable_dims(dims_full) small2 = torch.randn(*dims_small2, device=device).float() small2_expanded = small2.expand(*dims_full) if small.is_cuda and fn in ['map', 'map2']: # map and map2 are not implementd on CUDA tensors continue if hasattr(large_expanded, fn): # run through tensor versions of functions # and verify fully expanded inputs give same results expanded = {large: large_expanded, small: small_expanded, small2: small2_expanded} def tensorfn(myfn, t1, t2): if fn == "lerp": return myfn(t1, 0.5) elif fn == "masked_select": return myfn(t1 < 0) elif fn == "masked_scatter": return myfn(t1 < 0.5, full1d) elif fn == "masked_fill": return myfn(t1 < 0.5, 1.0) elif fn in fns_3_args: return myfn(1, t1, t2) elif fn in fns_value_kwarg: return myfn(t1, t2, value=1) else: return myfn(t1) # test various orders for first, second, third in [(large, small, small2), (small, large, small2), (small2, small, large), (small2, large, small)]: if first is None: break # ignore last iter when small2 is None method_expanded = getattr(expanded[first], fn) method = getattr(first, fn) r1 = tensorfn(method_expanded, expanded[second], expanded[third]) r2 = tensorfn(method, second, third) self.assertEqual(r1, r2) # now for torch. versions of functions if hasattr(torch, fn): fntorch = getattr(torch, fn) expanded = {large: large_expanded, small: small_expanded, small2: small2_expanded} def torchfn(t1, t2, t3): if fn == "lerp": return fntorch(t1, t2, 0.5) elif fn == "masked_select": return fntorch(t1, t2 < 0) elif fn == "masked_scatter": return fntorch(t1, t2 < 0.5, full1d) elif fn == "masked_fill": return fntorch(t1, t2 < 0.5, 1.0) elif fn in fns_3_args: return fntorch(t1, 1.0, t2, t3) elif fn in fns_value_kwarg: return fntorch(t1, t2, t3, value=1.0) else: return fntorch(t1, t2) # test various orders for first, second, third in [(large, small, small2), (small, large, small2), (small2, small, large), (small2, large, small)]: if first is None: break # ignore last iter when small2 is None r1 = torchfn(expanded[first], expanded[second], expanded[third]) r2 = torchfn(first, second, third) self.assertEqual(r1, r2) # now for in place functions # in-place tensor is not broadcastable; test only guaranteed # to work by broadcasting other argument(s) if not hasattr(large_expanded, fn + "_"): continue # need to clone largeExpanded so we can reuse, since functions are in-place large_expanded_clone = large_expanded.clone() def tensorfn_inplace(t0, t1, t2=None): t0_fn = getattr(t0, fn + "_") if fn == "lerp": return t0_fn(t1, 0.5) elif fn == "masked_scatter": return t0_fn(t1 < 0.5, full1d) elif fn == "masked_fill": return t0_fn(t1 < 0.5, 1.0) elif fn == "map": return t0_fn(t1, lambda x, y: x + y) elif fn == "map2": return t0_fn(t1, t2, lambda x, y, z: x + y + z) elif fn in fns_3_args: return t0_fn(1.0, t1, t2) elif fn in fns_value_kwarg: return t0_fn(t1, t2, value=1.0) else: return t0_fn(t1) # in-place pointwise operations don't actually work if the in-place # tensor is 0-strided (numpy has the same issue) if (0 not in large_expanded.stride() and 0 not in large_expanded_clone.stride()): r1 = tensorfn_inplace(large_expanded, small_expanded, small2_expanded) r2 = tensorfn_inplace(large_expanded_clone, small, small2) self.assertEqual(r1, r2) def broadcastable(t0, t1, t2=None): try: t1.expand_as(t0) if t2 is not None: t2.expand_as(t0) except RuntimeError: return False return True def _test_in_place_broadcastable(t0, t1, t2=None): if not broadcastable(t0, t1, t2): same_size = t0.numel() == t1.numel() and (t0.numel() == t2.numel() if t2 is not None else True) if not same_size: self.assertRaises(RuntimeError, lambda: tensorfn_inplace(t0, t1, t2)) else: tensorfn_inplace(t0, t1, t2) if fn not in fns_3_args and fn not in fns_value_kwarg: _test_in_place_broadcastable(small, large_expanded) _test_in_place_broadcastable(small, large) else: _test_in_place_broadcastable(small2, small_expanded, large_expanded) _test_in_place_broadcastable(small2, small, large) @unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error") @onlyCUDA @wrapDeterministicFlagAPITest def test_cublas_config_nondeterministic_alert(self, device): test_cases = [ # (function, (tensor sizes)) ('mm', ((2, 2), (2, 2),)), ('mv', ((2, 2), (2,),)), ('bmm', ((1, 2, 2), (1, 2, 2),))] test_configs = [ # (CuBLAS workspace config, is deterministic) ('garbage', False), (None, False), (':4096:8', True), (':16:8', True)] cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG' is_cuda10_2_or_higher = ( (torch.version.cuda is not None) and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2])) def test_case_info(fn_name, config): return f'function "{fn_name}' with config '{'' if config is None else config}"' # Create processes to test each combination of test cases and config settings processes = [] for fn_name, arg_sizes in test_cases: for config, is_config_deterministic in test_configs: env = os.environ.copy() if config is None: if env.get(cublas_var_name) is not None: del env[cublas_var_name] else: env[cublas_var_name] = config should_throw_error = is_cuda10_2_or_higher and not is_config_deterministic script = f""" import torch torch.use_deterministic_algorithms(True) fn = torch.{fn_name} arg_sizes = {arg_sizes} device = '{device}' should_throw_error = {should_throw_error} args = [] for arg_size in arg_sizes: args.append(torch.randn(*arg_size, device=device)) try: fn(*args) except RuntimeError as e: if not should_throw_error: raise RuntimeError('Did not expect any error to be raised') elif 'Deterministic behavior was enabled with either' not in str(e): raise RuntimeError('Expected a CuBLAS nondeterministic error, but got a different error') else: if should_throw_error: raise RuntimeError('Expected a CuBLAS nondeterministic error, but it was not raised') """ try: subprocess.check_output( [sys.executable, '-c', script], stderr=subprocess.STDOUT, # On Windows, opening the subprocess with the default CWD makes `import torch` # fail, so just set CWD to this script's directory cwd=os.path.dirname(os.path.realpath(__file__)), env=env) except subprocess.CalledProcessError as e: self.fail(msg=( f'Subprocess exception while attempting to run {test_case_info(fn_name, config)}:\n' + e.output.decode("utf-8"))) def test_nondeterministic_alert_AvgPool3d(self, device): module = torch.nn.AvgPool3d(3) input = torch.randn(2, 3, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('avg_pool3d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_AdaptiveAvgPool2d(self, device): module = torch.nn.AdaptiveAvgPool2d(3) input = torch.randn(2, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('adaptive_avg_pool2d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_AdaptiveAvgPool3d(self, device): module = torch.nn.AdaptiveAvgPool3d(3) input = torch.randn(2, 3, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('adaptive_avg_pool3d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_MaxPool3d(self, device): module = torch.nn.MaxPool3d(3) input = torch.randn(2, 3, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('max_pool3d_with_indices_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_AdaptiveMaxPool2d(self, device): module = torch.nn.AdaptiveMaxPool2d(3) input = torch.randn(2, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('adaptive_max_pool2d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_FractionalMaxPool2d(self, device): module = torch.nn.FractionalMaxPool2d(2, output_ratio=0.5) input = torch.randn(2, 3, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('fractional_max_pool2d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_FractionalMaxPool3d(self, device): module = torch.nn.FractionalMaxPool3d(2, output_ratio=0.5) input = torch.randn(2, 3, 3, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('fractional_max_pool3d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_interpolate_linear(self, device): input = torch.randn(1, 2, 4, device=device, requires_grad=True) res = torch.nn.functional.interpolate( input, size=12, mode='linear', align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('upsample_linear1d_backward_out_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_interpolate_bilinear(self, device): input = torch.randn(1, 2, 4, 4, device=device, requires_grad=True) res = torch.nn.functional.interpolate( input, size=12, mode='bilinear', align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('upsample_bilinear2d_backward_out_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_interpolate_bicubic(self, device): input = torch.randn(1, 2, 4, 4, device=device, requires_grad=True) res = torch.nn.functional.interpolate( input, size=12, mode='bicubic', align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('upsample_bicubic2d_backward_out_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_interpolate_trilinear(self, device): input = torch.randn(1, 2, 4, 4, 4, device=device, requires_grad=True) res = torch.nn.functional.interpolate( input, size=12, mode='trilinear', align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('upsample_trilinear3d_backward_out_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_ReflectionPad1d(self, device): module = torch.nn.ReflectionPad1d((1, 2)) input = torch.randn(2, 3, 8, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('reflection_pad1d_backward_out_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_ReflectionPad2d(self, device): module = torch.nn.ReflectionPad2d((1, 2, 3, 4)) input = torch.randn(2, 3, 8, 8, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('reflection_pad2d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_ReflectionPad3d(self, device): module = torch.nn.ReflectionPad3d((1, 2, 3, 4, 5, 6)) input = torch.randn(2, 3, 8, 8, 8, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('reflection_pad3d_backward_out_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_ReplicationPad1d(self, device): module = torch.nn.ReplicationPad1d((1, 2)) input = torch.randn(2, 3, 4, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('replication_pad1d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_ReplicationPad2d(self, device): module = torch.nn.ReplicationPad2d((1, 2, 3, 4)) input = torch.randn(2, 3, 4, 4, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('replication_pad2d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_ReplicationPad3d(self, device): module = torch.nn.ReplicationPad3d((1, 2, 3, 4, 5, 6)) input = torch.randn(2, 3, 4, 4, 4, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('replication_pad3d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_NLLLoss(self, device): module = torch.nn.NLLLoss() input = torch.randn(2, 3, 5, 5, device=device) target = torch.rand(2, 5, 5, device=device).mul(3).floor().long() @expectedAlertNondeterministic('SpatialClassNLLCriterion_updateOutput', 'cuda') def forward_func(slf, device): module(input, target) forward_func(self, device) def test_nondeterministic_alert_CTCLoss(self, device): module = torch.nn.CTCLoss() input = torch.randn(50, 3, 15, device=device, requires_grad=True) target = torch.randint(0, 14, (3, 30), device=device) input_lengths = [50, 50, 50] target_lengths = [30, 25, 20] res = module(input, target, input_lengths, target_lengths) grad = torch.ones_like(res) @expectedAlertNondeterministic('ctc_loss_backward_gpu', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_EmbeddingBag_max(self, device): module = torch.nn.EmbeddingBag( 4, 3, None, 2., False, 'max', _weight=torch.randn(4, 3, device=device, requires_grad=True)) input = torch.randint(0, 3, (4, 3), device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('embedding_bag_backward_cuda_max', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_scatter_add(self, device): def test_func(op_call): input = torch.randn(5, 4, device=device) dim = 0 index = torch.tensor([[3]], device=device) src = torch.tensor([[1.0]], device=device) @expectedAlertNondeterministic('scatter_add_cuda_kernel', 'cuda') def forward_func(slf, device): op_call(input, dim, index, src) forward_func(self, device) test_func(torch.Tensor.scatter_add_) test_func(torch.Tensor.scatter_add) test_func(torch.scatter_add) @onlyOnCPUAndCUDA def test_nondeterministic_alert_put(self, device): def test_func(op_call): a = torch.randn(10, device=device) indices = torch.tensor([0, 0], device=device) values = torch.tensor([0., 1.], device=device) @expectedAlertNondeterministic('put_') def forward_func(slf, device): op_call(a, indices, values, accumulate=False) forward_func(self, device) test_func(torch.Tensor.put) test_func(torch.Tensor.put_) def test_nondeterministic_alert_put_accumulate(self, device): def test_func(op_call): a = torch.randn(10, device=device) indices = torch.tensor([0, 0], device=device) values = torch.tensor([0., 1.], device=device) @expectedAlertNondeterministic('put_', 'cuda') def forward_func(slf, device): op_call(a, indices, values, accumulate=True) forward_func(self, device) test_func(torch.Tensor.put) test_func(torch.Tensor.put_) def test_nondeterministic_alert_histc(self, device): def test_func(op_call): a = torch.tensor([], device=device) @expectedAlertNondeterministic('_histc_cuda', 'cuda') def forward_func(slf, device): res = op_call(a, min=0, max=3) forward_func(self, device) test_func(torch.histc) test_func(torch.Tensor.histc) def test_nondeterministic_alert_bincount(self, device): def test_func(op_call): a = torch.tensor([], device=device, dtype=torch.long) @expectedAlertNondeterministic('_bincount_cuda', 'cuda') def forward_func(slf, device): res = op_call(a) forward_func(self, device) test_func(torch.bincount) test_func(torch.Tensor.bincount) # Ensures that kthvalue throws nondeterministic alerts in the correct cases @dtypes(torch.double) def test_nondeterministic_alert_kthvalue(self, device, dtype): @expectedAlertNondeterministic('kthvalue CUDA', 'cuda') def test_func(slf, device, call_type): S = 10 k = 5 a = torch.randn(S, device=device) if call_type == 'function': torch.kthvalue(a, k) elif call_type == 'method': a.kthvalue(k) elif call_type == 'out': values = torch.empty_like(a) indices = torch.empty((), device=device, dtype=torch.long) torch.kthvalue(a, k, out=(values, indices)) else: self.fail(f"'{call_type}' is not a valid call type") test_func(self, device, 'function') test_func(self, device, 'method') test_func(self, device, 'out') @onlyOnCPUAndCUDA def test_nondeterministic_alert_gather(self, device): def test_func(op_call): a = torch.randn(3, 3, device=device, requires_grad=True) dim = 0 index = torch.tensor([[0]], device=device) res = op_call(a, dim, index) grad = torch.ones_like(res) @expectedAlertNondeterministic('scatter_add_cuda_kernel', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) test_func(torch.gather) test_func(torch.Tensor.gather) def test_nondeterministic_alert_grid_sample_2d(self, device): input = torch.empty(1, 1, 2, 2, device=device, requires_grad=True) grid = torch.empty(1, 1, 1, 2, device=device) res = torch.nn.functional.grid_sample(input, grid, align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('grid_sampler_2d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_grid_sample_3d(self, device): input = torch.empty(1, 1, 2, 2, 2, device=device, requires_grad=True) grid = torch.empty(1, 1, 1, 2, 3, device=device) res = torch.nn.functional.grid_sample(input, grid, align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('grid_sampler_3d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_embedding_scalar_weight_error(self, device): indices = torch.rand(2, 2, device=device).long() weights = [ torch.tensor(1.0, device=device), torch.tensor(1.0, device=device).reshape(1, 1, 1), ] for weight in weights: with self.assertRaisesRegex(RuntimeError, "'weight' must be 2-D"): torch.embedding(weight, indices) def test_dist(self, device): def run_test(x, y): for p in [0, 1, 2, 3, 4, inf, -inf]: dist_xy = torch.dist(x, y, p) dist_xy_norm = torch.norm(x - y, p) self.assertEqual(dist_xy, dist_xy_norm) run_test(torch.randn(5, device=device), torch.randn(5, device=device)) x = torch.zeros(3, device=device) y = torch.zeros(3, device=device) y[1] = 1. run_test(x, y) # Ensures that median throws nondeterministic alerts in the correct cases @dtypes(torch.double) def test_nondeterministic_alert_median(self, device, dtype): def test_func(slf, device, call_type): S = 10 a = torch.randn(S, device=device) if call_type == 'function': torch.median(a) elif call_type == 'function with indices': torch.median(a, 0) elif call_type == 'method': a.median() elif call_type == 'method with indices': a.median(0) elif call_type == 'out with indices': result = torch.empty_like(a) indices = torch.empty((), dtype=torch.long, device=device) torch.median(a, 0, out=(result, indices)) else: self.fail(f"'{call_type}' is not a valid call type") @expectedAlertNondeterministic('median CUDA with indices output', 'cuda') def test_func_expect_error(slf, device, call_type): test_func(slf, device, call_type) test_func(self, device, 'function') test_func_expect_error(self, device, 'function with indices') test_func(self, device, 'method') test_func_expect_error(self, device, 'method with indices') test_func_expect_error(self, device, 'out with indices') def _test_gather_backward_one_dim(self, device, deterministic: bool = False) -> None: with DeterministicGuard(deterministic): m = random.randint(2000, 3000) elems = random.randint(10 * m, 20 * m) dim = 0 src = torch.randn(m, device=device, requires_grad=True) idx = torch.randint(m, (elems,), device=device) res = torch.gather(src, dim, idx) weight = torch.rand_like(res, device=device) * 10 ** 6 res.backward(weight) grad = src.grad.detach().clone() if torch.device(device).type == 'cuda': for _ in range(2): src.grad.data.zero_() res = torch.gather(src, dim, idx) res.backward(weight) self.assertEqual(src.grad, grad, atol=0, rtol=0) else: expected = torch.zeros_like(src, device=device) for i in range(elems): expected[idx[i]] += weight[i] self.assertEqual(grad, expected, atol=0, rtol=0) @onlyOnCPUAndCUDA def test_gather_backward_deterministic_path(self, device) -> None: self._test_gather_backward_one_dim(device, True) @onlyCPU def test_gather_backward_one_dim(self, device) -> None: self._test_gather_backward_one_dim(device, False) @onlyOnCPUAndCUDA def test_scatter_add_one_dim_deterministic(self, device) -> None: with DeterministicGuard(True): m = random.randint(20, 30) elems = random.randint(2000 * m, 3000 * m) dim = 0 src = torch.randn(elems, device=device) idx = torch.randint(m, (elems,), device=device) x = torch.zeros(m, device=device) res = x.scatter_add(dim, idx, src) expected = torch.zeros(m, device=device) for i in range(elems): expected[idx[i]] += src[i] self.assertEqual(res, expected, atol=0, rtol=0) @dtypes(*torch.testing.get_all_fp_dtypes()) def test_log_normal(self, device, dtype): a = torch.tensor([10], dtype=dtype, device=device).log_normal_() self.assertEqual(a.dtype, dtype) self.assertEqual(a.size(), torch.Size([1])) @dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes())) def test_geometric(self, device, dtype): a = torch.tensor([10], dtype=dtype, device=device).geometric_(0.5) self.assertEqual(a.dtype, dtype) self.assertEqual(a.size(), torch.Size([1])) def test_repeat_interleave(self, device): y = torch.tensor([[1, 2], [3, 4]], device=device) # exercise single argument function signature temp = y.repeat_interleave(2) self.assertEqual(torch.Size([8]), temp.size()) for dtype in [torch.int, torch.long]: lengths = torch.tensor([1, 2], dtype=dtype, device=device) output_size = torch.sum(lengths) a = torch.repeat_interleave( y, lengths, dim=0, ) self.assertEqual(a.dtype, y.dtype) self.assertEqual(a.size(), torch.Size([3, 2])) a_with_output = torch.repeat_interleave( y, lengths, dim=0, output_size=output_size, ) self.assertEqual(a_with_output.dtype, y.dtype) self.assertEqual(a_with_output.size(), torch.Size([3, 2])) @dtypes(*(torch.testing.get_all_fp_dtypes(include_half=False, include_bfloat16=False))) @dtypesIfCUDA(*(torch.testing.get_all_fp_dtypes(include_bfloat16=False))) def test_bernoulli_p(self, device, dtype): for trivial_p in ([0, 1], [1, 0, 1, 1, 0, 1]): x = torch.tensor(trivial_p, dtype=dtype, device=device) self.assertEqual(x.bernoulli().tolist(), trivial_p) def isBinary(t): return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum().item() == 0 p = torch.rand(5, 5, dtype=dtype, device=device) self.assertTrue(isBinary(p.bernoulli())) p = torch.rand(5, dtype=dtype, device=device).expand(5, 5) self.assertTrue(isBinary(p.bernoulli())) p = torch.rand(5, 5, dtype=dtype, device=device) torch.bernoulli(torch.rand_like(p), out=p) self.assertTrue(isBinary(p)) # RngUniform not implemented for Integral type in XLA test @dtypes(*(torch.testing.get_all_fp_dtypes(include_half=False, include_bfloat16=False))) @dtypesIfCPU(*(torch.testing.get_all_dtypes(include_half=False, include_bfloat16=False, include_complex=False))) @dtypesIfCUDA(*(torch.testing.get_all_dtypes(include_bfloat16=False, include_complex=False))) def test_bernoulli_self(self, device, dtype): def isBinary(t): return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum().item() == 0 t = torch.empty(10, 10, dtype=dtype, device=device) t.fill_(2) t.bernoulli_(0.5) self.assertTrue(isBinary(t)) for p_dtype in torch.testing.get_all_fp_dtypes(include_half=device.startswith('cuda'), include_bfloat16=False): p = torch.rand(10, dtype=p_dtype, device=device).expand(10, 10) t.fill_(2) t.bernoulli_(p) self.assertTrue(isBinary(t)) t.fill_(2) torch.bernoulli(torch.rand_like(t, dtype=p_dtype), out=t) self.assertTrue(isBinary(t)) t.fill_(2) t.bernoulli_(torch.rand_like(t, dtype=p_dtype)) self.assertTrue(isBinary(t)) @slowTest @dtypes(*(torch.testing.get_all_fp_dtypes(include_half=False, include_bfloat16=False))) @dtypesIfCUDA(*(torch.testing.get_all_fp_dtypes(include_bfloat16=False))) def test_bernoulli_edge_cases(self, device, dtype): # Need to draw a lot of samples to cover every random floating point number. a = torch.zeros(10000, 10000, dtype=dtype, device=device) # probability of drawing "1" is 0 num_ones = (torch.bernoulli(a) == 1).sum() self.assertEqual(num_ones, 0) b = torch.ones(10000, 10000, dtype=dtype, device=device) # probability of drawing "1" is 1 num_zeros = (torch.bernoulli(b) == 0).sum() self.assertEqual(num_zeros, 0) @dtypes(*torch.testing.get_all_fp_dtypes()) def test_exponential(self, device, dtype): a = torch.tensor([10], dtype=dtype, device=device).exponential_(0.5) self.assertEqual(a.dtype, dtype) self.assertEqual(a.size(), torch.Size([1])) # Tests extremal behavior tests = ((-0, float('inf')), (0, float('inf')), (float('inf'), 0)) for test in tests: t = torch.empty((1,), device=device, dtype=dtype).exponential_(test[0]) self.assertTrue(t.item() == test[1]) # Tests that negative lambda fails with self.assertRaises(RuntimeError): torch.empty((1,), device=device, dtype=dtype).exponential_(-0.5) @onlyCUDA @dtypesIfCUDA(torch.half, torch.float) def test_exponential_no_zero(self, device, dtype): # naively, 0 in exponential can be generated with probability 2^-24 # so we need more samples to check if it's not generated # instead of doing one # don't test CPU, that would be a long test x = torch.empty(50000000, device=device, dtype=dtype).exponential_() self.assertTrue(x.min() > 0) @skipIfNoSciPy @dtypes(*torch.testing.get_all_fp_dtypes()) def test_uniform_kstest(self, device, dtype): from scipy import stats size = 1000 for from_ in [-42, 0, 4.2]: for to_ in [-4.2, 0, 42]: if to_ > from_: t = torch.empty(size, dtype=dtype, device=device).uniform_(from_, to_) res = stats.kstest(t.cpu().to(torch.double), 'uniform', args=(from_, (to_ - from_))) self.assertTrue(res.statistic < 0.1) @skipIfNoSciPy @dtypes(*torch.testing.get_all_fp_dtypes(include_bfloat16=False)) @dtypesIfCUDA(*torch.testing.get_all_fp_dtypes()) def test_normal_kstest(self, device, dtype): from scipy import stats size = 1000 for mean in [-10, 0, 50]: for std in [1, 5, 10]: t = torch.empty(size, dtype=dtype, device=device).normal_(mean=mean, std=std) res = stats.kstest(t.cpu().to(torch.double), 'norm', args=(mean, std)) self.assertTrue(res.statistic < 0.1) @skipIfNoSciPy @dtypes(*torch.testing.get_all_fp_dtypes()) def test_lognormal_kstest(self, device, dtype): from scipy import stats size = 1000 for mean in [-3, 0, 7]: for std in [1, 5, 7]: t = torch.empty(size, dtype=dtype, device=device).log_normal_(mean=mean, std=std) res = stats.kstest(t.cpu().to(torch.double), 'lognorm', args=(std, 0, math.exp(mean))) if dtype == torch.half: self.assertTrue(res.statistic < 0.3) else: self.assertTrue(res.statistic < 0.1) @skipIfNoSciPy @dtypes(*torch.testing.get_all_fp_dtypes()) def test_exponential_kstest(self, device, dtype): from scipy import stats size = 1000 for lambd in [0.5, 1.0, 5.0]: t = torch.empty(size, dtype=dtype, device=device).exponential_(lambd=lambd) res = stats.kstest(t.cpu().to(torch.double), 'expon', args=(0, 1 / lambd,)) self.assertTrue(res.statistic < 0.1) @skipIfNoSciPy @dtypes(*torch.testing.get_all_fp_dtypes()) def test_cauchy_kstest(self, device, dtype): from scipy import stats size = 1000 for median in [-10, 0, 50]: for sigma in [0.5, 1.0, 10.0]: t = torch.empty(size, dtype=dtype, device=device).cauchy_(median=median, sigma=sigma) res = stats.kstest(t.cpu().to(torch.double), 'cauchy', args=(median, sigma)) self.assertTrue(res.statistic < 0.1) @skipIfNoSciPy @dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes())) def test_geometric_kstest(self, device, dtype): from scipy import stats size = 1000 for p in [0.2, 0.5, 0.8]: t = torch.empty(size, dtype=dtype, device=device).geometric_(p=p) actual = np.histogram(t.cpu().to(torch.double), np.arange(1, 100))[0] expected = stats.geom(p).pmf(np.arange(1, 99)) * size res = stats.chisquare(actual, expected) self.assertEqual(res.pvalue, 1.0, atol=0.1, rtol=0) def test_pairwise_distance_empty(self, device): shape = (2, 0) x = torch.randn(shape, device=device) y = torch.randn(shape, device=device) self.assertEqual(torch.zeros(2, device=device), torch.pairwise_distance(x, y)) self.assertEqual(torch.zeros((2, 1), device=device), torch.pairwise_distance(x, y, keepdim=True)) shape = (0, 2) x = torch.randn(shape, device=device) y = torch.randn(shape, device=device) self.assertEqual(torch.zeros(0, device=device), torch.pairwise_distance(x, y)) self.assertEqual(torch.zeros((0, 1), device=device), torch.pairwise_distance(x, y, keepdim=True)) def test_pdist_empty(self, device): shape = (0, 2) x = torch.randn(shape, device=device) self.assertEqual(torch.empty(0, device=device), torch.pdist(x)) shape = (1, 2) x = torch.randn(shape, device=device) self.assertEqual(torch.empty(0, device=device), torch.pdist(x)) shape = (3, 0) x = torch.randn(shape, device=device) self.assertEqual(torch.zeros(3, device=device), torch.pdist(x)) def test_cdist_empty(self, device): x = torch.randn((0, 5), device=device) y = torch.randn((4, 5), device=device) self.assertEqual(torch.empty(0, 4, device=device), torch.cdist(x, y)) x = torch.randn((2, 5), device=device) y = torch.randn((0, 5), device=device) self.assertEqual(torch.empty(2, 0, device=device), torch.cdist(x, y)) x = torch.randn((2, 0), device=device) y = torch.randn((3, 0), device=device) self.assertEqual(torch.zeros(2, 3, device=device), torch.cdist(x, y)) x = torch.randn((2, 0), device=device) y = torch.randn((0, 0), device=device) self.assertEqual(torch.empty(2, 0, device=device), torch.cdist(x, y)) def _brute_cdist(self, x, y, p=2): r1 = x.shape[-2] r2 = y.shape[-2] if r1 == 0 or r2 == 0: return torch.empty(r1, r2, device=x.device) return torch.norm(x[..., None, :] - y[..., None, :, :], p=p, dim=-1) def test_cdist_norm(self, device): for r1 in [3, 4, 5, 6]: for m in [2, 3, 4, 10]: for r2 in [4, 6, 7, 8]: for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: x = torch.randn(r1, m, device=device) y = torch.randn(r2, m, device=device) if p == 2: for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertEqual(expected, actual, rtol=0, atol=0.02) else: actual = torch.cdist(x, y, p=p) expected = self._brute_cdist(x, y, p=p) self.assertEqual(expected, actual) def test_cdist_norm_batch(self, device): for r1 in [3, 4, 5, 6]: for m in [2, 3, 4, 10]: for r2 in [4, 6, 7, 8]: for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: x = torch.randn(2, 3, 6, r1, m, device=device) y = torch.randn(2, 3, 6, r2, m, device=device) if p == 2: for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertEqual(expected, actual, rtol=0, atol=0.02) else: actual = torch.cdist(x, y, p=p) expected = self._brute_cdist(x, y, p=p) self.assertEqual(expected, actual) @onlyCUDA def test_cdist_cuda_backward(self, device): for l1 in [1, 511, 513]: for l2 in [1, 511, 513]: for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: x1 = torch.randn(4, l1, 32, device=device, requires_grad=True) x2 = x1.clone().detach_().requires_grad_() y1 = torch.randn(4, l2, 32, device=device, requires_grad=True) y2 = y1.clone().detach_().requires_grad_() if p == 2: for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: z1 = torch.cdist(x1, y1, p=2, compute_mode=cm).mean() z2 = self._brute_cdist(x2, y2, p=2).mean() z1.backward() z2.backward() self.assertEqual(x1.grad, x2.grad, rtol=0, atol=0.001) self.assertEqual(y1.grad, y2.grad, rtol=0, atol=0.001) else: z1 = torch.cdist(x1, y1, p=p).mean() z2 = self._brute_cdist(x2, y2, p=p).mean() self.assertEqual(x1.grad, x2.grad, rtol=0, atol=0.001) self.assertEqual(y1.grad, y2.grad, rtol=0, atol=0.001) @tf32_on_and_off(0.005) def test_cdist_large(self, device): for cm in ['use_mm_for_euclid_dist_if_necessary', 'use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: x = torch.randn(1000, 10, device=device) y = torch.randn(1000, 10, device=device) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertEqual(expected, actual) @slowTest @tf32_on_and_off(0.01) def test_cdist_large_batch(self, device): for cm in ['use_mm_for_euclid_dist_if_necessary', 'use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: x = torch.randn(4, 3, 1000, 10, device=device) y = torch.randn(4, 3, 1000, 10, device=device) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertEqual(expected, actual) @tf32_on_and_off(0.005) def test_cdist_non_contiguous(self, device): for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: x = torch.randn(5, 7, device=device).transpose(-1, -2) y = torch.randn(5, 3, device=device).transpose(-1, -2) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertFalse(x.is_contiguous()) self.assertFalse(y.is_contiguous()) self.assertEqual(expected, actual) x = torch.randn(7, 5, device=device) y = torch.randn(5, 3, device=device).t() actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertTrue(x.is_contiguous()) self.assertFalse(y.is_contiguous()) self.assertEqual(expected, actual) x = torch.randn(5, 7, device=device).t() y = torch.randn(3, 5, device=device) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertFalse(x.is_contiguous()) self.assertTrue(y.is_contiguous()) self.assertEqual(expected, actual) @tf32_on_and_off() def test_cdist_non_contiguous_batch(self, device): for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: x = torch.randn(4, 3, 2, 5, 7, device=device).transpose(-1, -2) y = torch.randn(4, 3, 2, 5, 3, device=device).transpose(-1, -2) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertFalse(x.is_contiguous()) self.assertFalse(y.is_contiguous()) self.assertEqual(expected, actual) x = torch.randn(7, 2, 7, 5, device=device) y = torch.randn(7, 2, 5, 3, device=device).transpose(-1, -2) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertTrue(x.is_contiguous()) self.assertFalse(y.is_contiguous()) self.assertEqual(expected, actual) x = torch.randn(4, 5, 7, device=device).transpose(-1, -2) y = torch.randn(4, 3, 5, device=device) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertFalse(x.is_contiguous()) self.assertTrue(y.is_contiguous()) self.assertEqual(expected, actual) def test_multinomial_constraints(self, device): x = torch.empty(1, 2, 3, dtype=torch.double, device=device) self.assertRaisesRegex( RuntimeError, "prob_dist must be 1 or 2 dim", lambda: torch.multinomial(x, 2)) x = torch.empty(1, 2, dtype=torch.long, device=device) self.assertRaisesRegex( RuntimeError, "multinomial only supports floating-point dtypes for input", lambda: torch.multinomial(x, 2)) x = torch.empty(1, 2, dtype=torch.double, device=device) y = torch.empty(1, 2, dtype=torch.double, device=device) self.assertRaisesRegex( RuntimeError, "multinomial expects Long tensor out", lambda: torch.multinomial(x, 2, out=y)) x = torch.empty(2, dtype=torch.double, device=device) self.assertRaisesRegex( RuntimeError, "cannot sample n_sample <= 0 samples", lambda: torch.multinomial(x, 0)) x = torch.empty(2, dtype=torch.double, device=device) self.assertRaisesRegex( RuntimeError, "cannot sample n_sample <= 0 samples", lambda: torch.multinomial(x, -1)) x = torch.empty(2, dtype=torch.double, device=device) self.assertRaisesRegex( RuntimeError, "cannot sample n_sample > prob_dist", lambda: torch.multinomial(x, 3, False)) x = torch.empty(16777217, dtype=torch.double, device=device) self.assertRaisesRegex( RuntimeError, "number of categories cannot exceed", lambda: torch.multinomial(x, 3)) def test_cumsum(self, device): x = torch.rand(100, 100, device=device) res1 = torch.cumsum(x, 1) res2 = torch.tensor([]).to(device) torch.cumsum(x, 1, out=res2) self.assertEqual(res1, res2) x.cumsum_(1) self.assertEqual(res1, x) a = torch.tensor([[True, False, True], [False, False, False], [True, True, True]], device=device) b = a.byte() aRes = torch.cumsum(a, 0) bRes = torch.cumsum(b, 0) self.assertEqual(aRes, bRes) self.assertEqual(aRes, torch.tensor([[1, 0, 1], [1, 0, 1], [2, 1, 2]])) aRes = torch.cumsum(a, 1) bRes = torch.cumsum(b, 1) self.assertEqual(aRes, bRes) self.assertEqual(aRes, torch.tensor([[1, 1, 2], [0, 0, 0], [1, 2, 3]])) # Check that cummulative sum over a zero length dimension doesn't crash on backprop. # Also check that cumsum over other dimensions in a tensor with a zero-length # dimensiuon also works # Also include a basic suite of similar tests for other bases cases. shapes = [[2, 0], [2, 1, 4], [0, 2, 3], [1], [5]] for shape in shapes: for dim in range(len(shape)): raw_tensor = torch.zeros(*shape, requires_grad=True) integrated = raw_tensor.cumsum(dim=dim) # Check that backward does not crash integrated.sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) # Check a scalar example raw_tensor = torch.tensor(3., requires_grad=True) integrated = raw_tensor.cumsum(dim=-1) self.assertEqual(raw_tensor, integrated) # Check that backward does not crash integrated.sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) def test_cumprod(self, device): x = torch.rand(100, 100, device=device) res1 = torch.cumprod(x, 1) res2 = torch.tensor([]).to(device) torch.cumprod(x, 1, out=res2) self.assertEqual(res1, res2) x.cumprod_(1) self.assertEqual(res1, x) a = torch.tensor([[True, False, True], [False, False, False], [True, True, True]], dtype=torch.bool, device=device) b = a.byte() aRes = torch.cumprod(a, 0) bRes = torch.cumprod(b, 0) self.assertEqual(aRes, bRes) self.assertEqual(aRes, torch.tensor([[1, 0, 1], [0, 0, 0], [0, 0, 0]])) aRes = torch.cumprod(a, 1) bRes = torch.cumprod(b, 1) self.assertEqual(aRes, bRes) self.assertEqual(aRes, torch.tensor([[1, 0, 0], [0, 0, 0], [1, 1, 1]])) # Check that cummulative prod over a zero length dimension doesn't crash on backprop. # Also check that cumprod over other dimensions in a tensor with a zero-length # dimensiuon also works # Also include a basic suite of similar tests for other bases cases. shapes = [[2, 0], [2, 1, 4], [0, 2, 3], [1], [5]] for shape in shapes: for dim in range(len(shape)): raw_tensor = torch.zeros(*shape, requires_grad=True) integrated = raw_tensor.cumprod(dim=dim) # Check that backward does not crash integrated.sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) # Check a scalar example raw_tensor = torch.tensor(3., requires_grad=True) integrated = raw_tensor.cumprod(dim=-1) self.assertEqual(raw_tensor, integrated) # Check that backward does not crash integrated.sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) def test_cummax_cummin(self, device): def test_ops(op, string_of_function_name, expected_output1, expected_output2): x = torch.rand(100, 100, device=device) out1 = op(x, 1) res2 = torch.empty(0, device=device) indices2 = torch.empty(0, dtype=torch.int64, device=device) op(x, 1, out=(res2, indices2)) self.assertEqual(out1[0], res2) self.assertEqual(out1[1], indices2) a = torch.tensor([[True, False, True], [False, False, False], [True, True, True]], dtype=torch.bool, device=device) b = a.byte() aRes = op(a, 0) bRes = op(b, 0) self.assertEqual(aRes[0], bRes[0].bool()) self.assertEqual(aRes[0], expected_output1.bool()) # test inf and nan input x = torch.tensor([4, inf, 1.5, -inf, 0, nan, 1]) xRes = op(x, 0)[0] self.assertEqual(xRes, expected_output2) # op shouldn't support values, indices with a dtype, device type or layout # different from that of input tensor t = torch.randn(10) values = torch.empty(0, dtype=torch.int16) indices = torch.empty(0, dtype=torch.int64) with self.assertRaisesRegex( RuntimeError, 'expected scalar_type Float but found Short'): op(t, 0, out=(values, indices)) # Check that op over a zero length dimension doesn't crash on backprop. # Also check that op over other dimensions in a tensor with a zero-length # dimension also works # Also include a basic suite of similar tests for other bases cases. shapes = [[2, 0], [2, 1, 4], [0, 2, 3], [1], [5]] for shape in shapes: for dim in range(len(shape)): raw_tensor = torch.zeros(*shape, requires_grad=True) integrated = getattr(raw_tensor, string_of_function_name)(dim=dim) # Check that backward does not crash integrated[0].sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) # Check a scalar example raw_tensor = torch.tensor(3., requires_grad=True) integrated = getattr(raw_tensor, string_of_function_name)(dim=-1) # Check that backward does not crash integrated[0].sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) expected_out = torch.tensor([4, inf, inf, inf, inf, nan, nan]) test_ops(torch.cummax, "cummax", torch.tensor([[1, 0, 1], [1, 0, 1], [1, 1, 1]]), expected_out) expected_out = torch.tensor([4, 4, 1.5, -inf, -inf, nan, nan]) test_ops(torch.cummin, "cummin", torch.tensor([[1, 0, 1], [0, 0, 0], [0, 0, 0]]), expected_out) def test_logcumsumexp(self, device): def logcumsumexp(a, axis): return torch.cumsum(a.exp(), axis=axis).log_() axis = -1 a = torch.randn(100, 100, device=device) actual = a.logcumsumexp(axis) expected = logcumsumexp(a, axis) self.assertEqual(a.dtype, actual.dtype) self.assertEqual(expected.shape, actual.shape) self.assertEqual(expected, actual) # check -inf and nan handling x = torch.tensor([-float('inf'), -float('inf'), 1.0, 1.0, float('inf'), float('inf'), float('nan'), 1.0, 1.0], device=device) x2d = x.unsqueeze(0).expand(2, -1) for inp in (x, x2d): actual = inp.logcumsumexp(axis) expected = logcumsumexp(inp, axis) self.assertEqual(expected, actual) # Check that out is actually inplace b = torch.randn(5, 2, device=device) inplace_out = torch.zeros(5, 2, device=device) expected = logcumsumexp(b, axis) torch.logcumsumexp(b, axis=axis, out=inplace_out) self.assertEqual(inplace_out, expected) # Check input and inplace_output type mismatch b = torch.randn(5, 2, device=device, dtype=torch.float64) inplace_out = torch.zeros(5, 2, device=device, dtype=torch.float32) with self.assertRaisesRegex( RuntimeError, 'expected scalar_type Double but found Float'): torch.logcumsumexp(b, axis, out=inplace_out) def _test_diff_numpy(self, t, dims=None): # Helper for test_diff to compare with NumPy reference implementation def to_np(t): if t.dtype == torch.bfloat16: return t.to(dtype=torch.float, device="cpu").numpy() else: return t.cpu().numpy() for dim in dims if dims else range(t.dim()): prepend = t.narrow(dim, 0, 1) append = t.narrow(dim, 0, 1) np_t = to_np(t) # test when prepend and append's size along dim is 1 actual = torch.diff(t, dim=dim, prepend=prepend, append=append) expected = torch.from_numpy(np.diff(np_t, axis=dim, prepend=to_np(prepend), append=to_np(append))) self.assertEqual(actual, expected.to(t.dtype)) # test when prepend and append's size along dim != 1 actual = torch.diff(t, dim=dim, prepend=t, append=t) expected = torch.from_numpy(np.diff(np_t, axis=dim, prepend=np_t, append=np_t)) self.assertEqual(actual, expected.to(t.dtype)) # All tensors appear contiguous on XLA @onlyOnCPUAndCUDA @dtypes(*torch.testing.get_all_dtypes()) def test_diff_noncontig(self, device, dtype): shapes = ( (1,), (1, 5), (3, 5), (1, 5, 1), (2, 3, 5)) for shape in shapes: contig = make_tensor(shape, device, dtype, low=-9, high=9) non_contig = torch.empty(shape + (2, 2), device=device, dtype=dtype)[..., 0] non_contig = non_contig.select(-1, -1) non_contig.copy_(contig) self.assertTrue(not non_contig.is_contiguous() or shape == (1,)) self._test_diff_numpy(non_contig) # RngNormal not implemented for type f16 for XLA @dtypes(*torch.testing.get_all_dtypes(include_half=False)) @dtypesIfCPU(*torch.testing.get_all_dtypes()) @dtypesIfCUDA(*torch.testing.get_all_dtypes()) def test_diff(self, device, dtype): shapes = ( (1,), (1, 5), (3, 5), (1, 5, 1), (2, 3, 5)) for shape in shapes: contig = make_tensor(shape, device, dtype, low=-9, high=9) self._test_diff_numpy(contig) t = torch.ones(2, 3) with self.assertRaisesRegex( RuntimeError, 'diff expects prepend or append to be the same dimension as input'): invalid_prepend = torch.tensor([1, 2, 3], device=device, dtype=dtype) t.diff(dim=0, prepend=invalid_prepend) with self.assertRaisesRegex( RuntimeError, 'diff expects the shape of tensor to prepend or append to match that of input'): invalid_prepend = torch.tensor([[0, 1]], device=device, dtype=dtype) t.diff(dim=0, prepend=invalid_prepend) with self.assertRaisesRegex( RuntimeError, 'diff only supports n = 1 currently'): torch.diff(t, n=2) with self.assertRaisesRegex( RuntimeError, 'diff expects input to be at least one-dimensional'): scalar = torch.tensor(2, device=device, dtype=dtype) torch.diff(scalar) # if the given input arg is not a list, it returns a list of single element: [arg] def _wrap_to_list(self, input_array): return input_array if isinstance(input_array, list) else [input_array] # To ensure inf, -inf, and nan values do not cause divergence between Numpy and PyTorch. # There are two types of possible divergence: # 1. When we compute a,b both real numbers and has very small absolute values (i.e. very near to 0.0) # then, result of a/b be inf, -inf and nan, and this cause divergence. # 2. When we are dividing complex numbers by zero. For example, when a = torch.tensor(3+5j) we have # a/0 to be equal to nan + nan*j in PyTorch and inf + inf*j in Numpy. def _inf_nan_preprocess(self, actual, expected): for i in range(len(expected)): expected[i] = np.nan_to_num(expected[i], nan=nan, posinf=nan, neginf=nan) # nan_to_num is not defined for complex tensors in PyTorch. if actual[i].dtype == torch.complex64 : actual[i].real = torch.nan_to_num(actual[i].real, nan=nan, posinf=nan, neginf=nan) actual[i].imag = torch.nan_to_num(actual[i].imag, nan=nan, posinf=nan, neginf=nan) else: actual[i] = torch.nan_to_num(actual[i], nan=nan, posinf=nan, neginf=nan) return actual, expected @onlyOnCPUAndCUDA @dtypes(torch.long, torch.float32, torch.complex64) def test_gradient_all(self, device, dtype): def create_scalar(shape): return make_tensor((1,), device='cpu', dtype=dtype, low=1.).item() def create_list(shape): return make_tensor((len(shape),), device='cpu', dtype=dtype, low=1.).tolist() def create_coordinate_tensors(shape): tensor_list = [] for i in range(len(shape)): tensor_list.append(make_tensor((shape[i],), device=device, dtype=dtype)) return tensor_list def filter_shape(shape, dim): filtered_shape = [] for i in range(len(dim)): filtered_shape.append(shape[dim[i]]) return filtered_shape # shape, dims format test_cases = ( ((5,), (0,)), ((4, 4), (0, 1)), ((3, 3, 3), (-1, 0)), ((4, 4, 4), (2,)), ((4, 4, 4), (0, 1)), ((4, 4, 4, 3), (0, 2, 3)), ((4, 5, 3, 4, 3), (1, 2)), ((4, 3, 6, 5, 3), (2, 4)), ((4, 3, 3, 5, 3), (0, 1, 2, 3, 4)), ) for case, contig, edge_order, space_fn in product(test_cases, [True, False], [1, 2], (create_scalar, create_list, create_coordinate_tensors)): shape, dims = case # filter shape by dims before passing filtered shape to create_* functions filtered_shape = filter_shape(shape, dims) spacing = space_fn(filtered_shape) t = make_tensor(shape, device=device, dtype=dtype, noncontiguous=not contig) t_np = t.cpu().numpy() actual = torch.gradient(t, spacing=spacing, dim=dims, edge_order=edge_order) if space_fn == create_coordinate_tensors and spacing[0].device != 'cpu': spacing = [space.cpu().detach().numpy() for space in spacing] expected = np.gradient(t_np, *self._wrap_to_list(spacing), axis=dims, edge_order=edge_order) actual, expected = self._inf_nan_preprocess(list(actual), self._wrap_to_list(expected)) self.assertEqual(actual, expected, equal_nan="relaxed", atol=1e-4, rtol=0, exact_dtype=False) @onlyOnCPUAndCUDA @dtypes(torch.long, torch.float32, torch.complex64) def test_gradient_extreme_cases(self, device, dtype): # Test behaviour for inf and nan values actual = torch.gradient(torch.tensor([2, -2, inf, inf, -inf, -inf, inf, 3, -inf, 2, nan, nan, 3, inf, nan])) expected = np.gradient(np.array([2, -2, inf, inf, -inf, -inf, inf, 3, -inf, 2, nan, nan, 3, inf, nan])) self.assertEqual(actual, self._wrap_to_list(expected), exact_dtype=False) # Test behaviour in very big tensors large_size = 100000 t = make_tensor((large_size,), device, dtype) t_np = t.cpu().numpy() coordinates_np = list(np.random.randn(large_size)) coordinates = [torch.tensor(coordinates_np, device=device)] actual = torch.gradient(t, spacing=coordinates, dim=0, edge_order=1) expected = [np.gradient(t_np, coordinates_np, axis=0, edge_order=1)] self.assertEqual(actual, expected, exact_dtype=False) actual = torch.gradient(t, spacing=coordinates, dim=0, edge_order=2) expected = [np.gradient(t_np, coordinates_np, axis=0, edge_order=2)] self.assertEqual(actual, expected, exact_dtype=False) @onlyOnCPUAndCUDA def test_gradient_type_promotion(self, device): inputs = ( make_tensor((4, 4), device=device, dtype=torch.float32), make_tensor((4, 4), device=device, dtype=torch.complex64), make_tensor((4, 4), device=device, dtype=torch.int64), ) spacing = ( make_tensor((1,), device='cpu', dtype=torch.float32).item(), make_tensor((1,), device='cpu', dtype=torch.int64).item(), make_tensor((1,), device='cpu', dtype=torch.complex64).item(), make_tensor((2,), device='cpu', dtype=torch.float32, low=0.1).tolist(), make_tensor((2,), device='cpu', dtype=torch.int64, low=1).tolist(), make_tensor((2,), device='cpu', dtype=torch.complex64).tolist(), [make_tensor((4,), device=device, dtype=torch.float32), make_tensor((4,), device=device, dtype=torch.float32)], [make_tensor((4,), device=device, dtype=torch.int64), make_tensor((4,), device=device, dtype=torch.int64)], [make_tensor((4,), device=device, dtype=torch.complex64), make_tensor((4,), device=device, dtype=torch.complex64)], ) for input, spacing_or_coord, edge_order in product(inputs, spacing, [1, 2]): input_np = input.cpu().numpy() input_np = input.cpu().numpy() actual = torch.gradient(input, spacing=spacing_or_coord, dim=(0, 1), edge_order=edge_order) spacing_or_coord_wrapped = self._wrap_to_list(spacing_or_coord) spacing_or_coord_np = [] if torch.is_tensor(spacing_or_coord_wrapped[0]) and torch.device(spacing_or_coord_wrapped[0].device).type != 'cpu': for i in range(len(spacing_or_coord_wrapped)): spacing_or_coord_np.append(spacing_or_coord_wrapped[i].detach().clone().cpu().numpy()) else: spacing_or_coord_np = spacing_or_coord_wrapped expected = np.gradient(input_np, *spacing_or_coord_np, axis=(0, 1), edge_order=edge_order) if actual[0].dtype == torch.complex64 and input.dtype != torch.complex64: for i in range(len(actual)): self.assertEqual(actual[i].real, expected[i].real, exact_dtype=False) # Type promotion fails on Numpy when spacing is given as complex number and input is given as real. # Result is given just as real number and all the imaginary parts to be equal to zero. self.assertEqual(expected[i].imag, torch.zeros(actual[i].shape), exact_dtype=False) else: actual, expected = self._inf_nan_preprocess(list(actual), expected) self.assertEqual(actual, expected, equal_nan="relaxed", exact_dtype=False) @onlyOnCPUAndCUDA @dtypes(torch.long, torch.float32, torch.complex64) def test_error_gradient(self, device, dtype): t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device, dtype=dtype) with self.assertRaisesRegex(RuntimeError, 'torch.gradient expected spacing to be unspecified, a scalar '): dim = (1, 0) spacing = [0.1] torch.gradient(t, spacing=spacing, dim=dim, edge_order=1) with self.assertRaisesRegex(RuntimeError, 'torch.gradient only supports edge_order=1 and edge_order=2.'): torch.gradient(t, edge_order=3) with self.assertRaisesRegex(RuntimeError, 'dim 1 appears multiple times in the list of dims'): dim = (1, 1) spacing = 0.1 torch.gradient(t, spacing=spacing, dim=dim, edge_order=1) with self.assertRaisesRegex(RuntimeError, 'torch.gradient expected each tensor to be on the same device,'): dim = (0, 1) coordinates = [torch.tensor([1, 2, 4], device='cpu'), torch.tensor([1, 2, 4], device='meta')] torch.gradient(t, spacing=coordinates, dim=dim, edge_order=1) with self.assertRaises(IndexError): torch.gradient(t, dim=3) with self.assertRaisesRegex(RuntimeError, 'torch.gradient expected each dimension size to be at least'): torch.gradient(torch.tensor([[1], [2], [3]]), edge_order=1) with self.assertRaisesRegex(RuntimeError, 'torch.gradient expected each dimension size to be at least'): torch.gradient(torch.tensor([[1, 2], [3, 4]]), edge_order=2) def _test_large_cum_fn_helper(self, x, fn): x_cpu = x.cpu().float() expected = fn(x_cpu) actual = fn(x).cpu().float() self.assertEqual(expected, actual.cpu().float()) @unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "sandcastle OOM with current tpx gpu/re configuration") @onlyCUDA @dtypesIfCUDA(torch.half) # only small dtype not to get oom def test_large_cumsum(self, device, dtype): # initialization to avoid overflow and half caveats x = torch.empty(2**30 + 200, device=device, dtype=dtype) x[::3] = -3 x[1::3] = 2 x[2::3] = 1 self._test_large_cum_fn_helper(x, lambda x: torch.cumsum(x, 0)) @onlyCUDA @dtypesIfCUDA(torch.half) # only small dtype not to get oom def test_large_cumprod(self, device, dtype): # initialization to avoid overflow and half caveats x = torch.empty(2**30 + 200, device=device, dtype=dtype) x[::3] = 8 x[1::3] = .25 x[2::3] = .5 self._test_large_cum_fn_helper(x, lambda x: torch.cumprod(x, 0)) def test_discontiguous_out_cumsum(self, device): x = torch.randn(4, 8, device=device) y = torch.empty(4, 16, device=device)[:, ::2] out = torch.cumsum(x, 0) torch.cumsum(x, 0, out=y) self.assertFalse(y.is_contiguous()) self.assertEqual(out, y, atol=0., rtol=0.) def _test_cumminmax_helper(self, x, fn, expected_val, expected_ind): val, ind = fn(x, -1) self.assertEqual(val, expected_val, atol=0, rtol=0) self.assertEqual(ind, expected_ind, atol=0, rtol=0) out_val = torch.empty_like(val).t().contiguous().t() out_ind = torch.empty_like(ind).t().contiguous().t() fn(x, -1, out=(out_val, out_ind)) self.assertFalse(out_val.is_contiguous()) self.assertFalse(out_ind.is_contiguous()) self.assertEqual(out_val, expected_val, atol=0, rtol=0) self.assertEqual(out_ind, expected_ind, atol=0, rtol=0) def test_cummax_discontiguous(self, device): x = torch.tensor([[0, 1, 2, 3, 2, 1], [4, 5, 6, 5, 6, 7]], device=device, dtype=torch.float).t().contiguous().t() expected_val = torch.tensor([[0, 1, 2, 3, 3, 3], [4, 5, 6, 6, 6, 7]], device=device, dtype=torch.float) expected_ind = torch.tensor([[0, 1, 2, 3, 3, 3], [0, 1, 2, 2, 4, 5]], device=device, dtype=torch.long) self._test_cumminmax_helper(x, torch.cummax, expected_val, expected_ind) def test_cummin_discontiguous(self, device): x = torch.tensor([[3, 2, 1, 0, 1, 2], [7, 6, 5, 4, 5, 2]], device=device, dtype=torch.float).t().contiguous().t() expected_val = torch.tensor([[3, 2, 1, 0, 0, 0], [7, 6, 5, 4, 4, 2]], device=device, dtype=torch.float) expected_ind = torch.tensor([[0, 1, 2, 3, 3, 3], [0, 1, 2, 3, 3, 5]], device=device, dtype=torch.long) self._test_cumminmax_helper(x, torch.cummin, expected_val, expected_ind) def test_bool_tensor_value_change(self, device): x = torch.tensor([True, False], dtype=torch.bool, device=device) x[0] = False x[1] = True self.assertEqual(x, torch.tensor([False, True], dtype=torch.bool, device=device)) def test_unfold_all_devices_and_dtypes(self, device): for dt in torch.testing.get_all_dtypes(): if dt == torch.bool: x = torch.empty((0, 1, 3, 0), dtype=dt, device=device) self.assertEqual((0, 1, 1, 0, 3), x.unfold(2, 3, 2).shape) else: x = torch.empty((0, 1, 3, 0), dtype=dt, device=device) self.assertEqual((0, 1, 1, 0, 3), x.unfold(2, 3, 2).shape) def test_unfold_scalars(self, device): x = torch.tensor(0.5, device=device) # unfold on a 0-dimensional tensor should always return a 1-d dimensional # tensor of shape [size] (i.e., the second parameter to unfold) self.assertEqual(torch.empty(0, device=device), x.unfold(0, 0, 1)) self.assertEqual(torch.empty(0, device=device), x.unfold(0, 0, 2)) self.assertEqual(torch.tensor([0.5], device=device), x.unfold(0, 1, 1)) def test_copy_all_dtypes_and_devices(self, device): from copy import copy for dt in torch.testing.get_all_dtypes(): x = torch.tensor([1, 2, 3, 4], dtype=dt, device=device) x_clone = x.clone() y = copy(x) y.fill_(1) # copy is a shallow copy, only copies the tensor view, # not the data self.assertEqual(x, y) def test_clone_all_dtypes_and_devices(self, device): for dt in torch.testing.get_all_dtypes(): x = torch.tensor((1, 1), dtype=dt, device=device) y = x.clone() self.assertEqual(x, y) def test_clone_zero_stride_dim(self, device): # stride zero, size 1 axis, not contiguous x = torch.randn(10) y = x.as_strided([2, 1, 5], [1, 0, 2]) self.assertEqual(y, y.clone()) @dtypesIfCUDA(*set(torch.testing.get_all_math_dtypes('cuda'))) @dtypes(*set(torch.testing.get_all_math_dtypes('cpu'))) def test_addcmul(self, device, dtype): # Returns floating or integral scalar corresponding to dtype def _number(floating, integer, dtype): if dtype in [torch.half, torch.float, torch.double, torch.bfloat16]: return floating elif dtype in [torch.cfloat, torch.cdouble]: return floating * (1 + 1j) else: return integer def rand_tensor(size, dtype, device): if dtype.is_floating_point or dtype.is_complex: return torch.rand(size=size, dtype=dtype, device=device) if dtype == torch.uint8: return torch.randint(1, 5, size=size, dtype=dtype, device=device) else: return torch.randint(-5, 5, size=size, dtype=dtype, device=device) a = rand_tensor((2, 2), dtype=dtype, device=device) b = rand_tensor((2, 2), dtype=dtype, device=device) c = rand_tensor((2, 2), dtype=dtype, device=device) alpha = _number(0.5, 3, dtype) actual = torch.addcmul(a, b, c, value=alpha) expected = a + alpha * b * c self.assertEqual(expected, actual) with self.assertWarnsOnceRegex( UserWarning, "This overload of addcmul is deprecated"): self.assertEqual(actual, torch.addcmul(a, alpha, b, c)) if self.device_type == 'cuda' and dtype == torch.half: a = torch.tensor([60000.0], device=device, dtype=dtype) b = torch.tensor([60000.0], device=device, dtype=dtype) c = torch.tensor([2.0], device=device, dtype=dtype) out = torch.addcmul(a, b, c, value=-1) self.assertTrue(not (out.isnan() or out.isinf())) def test_narrow_empty(self, device): x = torch.randn(2, 3, 4, device=device) for d in range(x.dim()): y = x.narrow(d, x.size(d), 0) sz = list(x.size()) sz[d] = 0 self.assertEqual(sz, y.size()) @dtypes(*torch.testing.get_all_dtypes()) def test_index_copy(self, device, dtype): # We just test for num_copy <= num_dest, as otherwise there are repeated indices # and the behavior is undefined num_copy, num_dest = 3, 5 def make_arg(batch_sizes, n, dim, contig): size_arg = batch_sizes[:dim] + (n,) + batch_sizes[dim:] return make_tensor(size_arg, device, dtype, low=None, high=None, noncontiguous=not contig) def ref_index_copy(tgt, dim, idx, src): for i in range(idx.size(0)): idx_dest = dim * (slice(None),) + (idx[i],) idx_src = dim * (slice(None),) + (i,) tgt[idx_dest] = src[idx_src] # More thorough testing as in index_add for dest_contig, src_contig, index_contig in product([True, False], repeat=3): for other_sizes in ((), (4, 5)): for dim in range(len(other_sizes)): dest = make_arg(other_sizes, num_dest, dim, dest_contig) src = make_arg(other_sizes, num_copy, dim, src_contig) idx = torch.randperm(num_dest, dtype=torch.int64, device=device)[:num_copy] if not index_contig: idx = torch.repeat_interleave(idx, 2, dim=-1) idx = idx[..., ::2] dest2 = dest.clone() dest.index_copy_(dim, idx, src) ref_index_copy(dest2, dim, idx, src) self.assertEqual(dest, dest2) # onlyOnCPUAndCUDA due to an XLA error: # https://github.com/pytorch/pytorch/issues/53256 @onlyOnCPUAndCUDA @dtypes(*torch.testing.get_all_dtypes()) def test_index_copy_scalars(self, device, dtype): # Create the 8 possible combinations of scalar sizes for target / index / source scalars = ((make_tensor(size_t, dtype=dtype, device=device, low=None, high=None), make_tensor(size_i, dtype=torch.int64, device=device, low=0, high=1), make_tensor(size_s, dtype=dtype, device=device, low=None, high=None)) for size_t, size_i, size_s in product([(), (1,)], repeat=3)) for target, idx, source in scalars: target.index_copy_(0, idx, source) self.assertEqual(target.item(), source.item()) @onlyCPU def test_errors_index_copy(self, device): # We do not test the GPU as the CUDA_ASSERT would break the CUDA context idx_dim = 8 tgt_dim = 5 batch_dim = 3 # Too large of an index a = torch.randn(batch_dim, tgt_dim, device=device) idx = torch.full((idx_dim,), tgt_dim, device=device) c = torch.zeros(batch_dim, idx_dim, device=device) with self.assertRaises(IndexError): a.index_copy_(1, idx, c) # Too small (negative indices) idx = torch.full((idx_dim,), -1, device=device) with self.assertRaises(IndexError): a.index_copy_(1, idx, c) # Too small (very negative indices) - they should be unsupported even # when support for negative indices is implemented for index_copy_ idx = torch.full((idx_dim,), -tgt_dim - 1, device=device) with self.assertRaises(IndexError): a.index_copy_(1, idx, c) def _prepare_data_for_index_copy_and_add_deterministic( self, dim: int, device: torch.device ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: assert (dim >= 0 and dim < 3) a = [5, 4, 3] a[dim] = 2000 x = torch.zeros(a, device=device) b = a.copy() elems = a[dim] * 20 b[dim] = elems src = torch.rand(b, device=device) index = torch.randint(a[dim], (elems,), device=device) return (x, index, src) @onlyOnCPUAndCUDA def test_index_copy_deterministic(self, device: torch.device) -> None: for dim in range(3): x, index, src = self._prepare_data_for_index_copy_and_add_deterministic(dim, device) with DeterministicGuard(True): y0 = torch.index_copy(x, dim, index, src) x0 = x.clone().detach() index_list = index.tolist() for i in range(len(index_list)): if dim == 0: x0[index_list[i], :, :] = src[i, :, :] elif dim == 1: x0[:, index_list[i], :] = src[:, i, :] elif dim == 2: x0[:, :, index_list[i]] = src[:, :, i] self.assertEqual(x0, y0, atol=0, rtol=0) @onlyOnCPUAndCUDA def test_index_add_deterministic(self, device: torch.device) -> None: for dim in range(3): x, index, src = self._prepare_data_for_index_copy_and_add_deterministic(dim, device) alpha = random.random() + 1 # on CPU it should be deterministic regardless of the deterministic mode with DeterministicGuard(True): y0 = torch.index_add(x, dim, index, src, alpha=alpha) for _ in range(3): y = torch.index_add(x, dim, index, src, alpha=alpha) self.assertEqual(y, y0, atol=0, rtol=0) with DeterministicGuard(False): for _ in range(3): y_nd = torch.index_add(x, dim, index, src, alpha=alpha) self.assertEqual(y_nd, y0, atol=1e-3, rtol=1e-5) @onlyOnCPUAndCUDA def test_index_put_non_accumulate_deterministic(self, device) -> None: with DeterministicGuard(True): for i in range(3): m = random.randint(10, 20) elems = random.randint(20000, 30000) values = torch.rand(elems, device=device) indices = torch.randint(m, (elems,), device=device) input = torch.rand(m, device=device) output = input.index_put((indices,), values, accumulate=False) input_list = input.tolist() indices_list = indices.tolist() values_list = values.tolist() for i, v in zip(indices_list, values_list): input_list[i] = v self.assertEqual(output, input_list) @dtypes(*torch.testing.get_all_dtypes()) def test_index_fill(self, device, dtype): x = torch.tensor([[1, 2], [4, 5]], dtype=dtype, device=device) index = torch.tensor([0], device=device) x.index_fill_(1, index, 0) self.assertEqual(x, torch.tensor([[0, 2], [0, 5]], dtype=dtype, device=device)) if not x.is_complex(): with self.assertRaisesRegex(RuntimeError, r"Scalar"): x.index_fill_(1, index, 1 + 1j) # Make sure that the result stays 0-dim while applied to # a 0-dim input x = torch.tensor(1, dtype=dtype, device=device) self.assertEqual(0, x.index_fill(0, index, -1).dim()) self.assertEqual(0, x.index_fill_(0, index, -1).dim()) # The test fails for zero-dimensional tensors on XLA @onlyOnCPUAndCUDA @dtypes(*torch.testing.get_all_dtypes()) def test_index_select(self, device, dtype): num_src, num_out = 3, 5 def make_arg(batch_sizes, n, dim, contig): size_arg = batch_sizes[:dim] + (n,) + batch_sizes[dim:] return make_tensor(size_arg, device, dtype, low=None, high=None, noncontiguous=not contig) def ref_index_select(src, dim, idx): # bfloat16 is just used on GPU, so it's not supported on numpy if dtype == torch.bfloat16: src = src.float() out = torch.from_numpy(np.take(src.cpu().numpy(), idx.cpu().numpy(), axis=dim)) if dtype == torch.bfloat16: out = out.to(device=device, dtype=dtype) return out for src_contig, idx_contig in product([True, False], repeat=2): for other_sizes in ((), (4, 5)): for dim in range(len(other_sizes)): src = make_arg(other_sizes, num_src, dim, src_contig) idx = make_tensor((num_out,), device, dtype=torch.int64, low=0, high=num_src, noncontiguous=not idx_contig) out = torch.index_select(src, dim, idx) out2 = ref_index_select(src, dim, idx) self.assertEqual(out, out2) for idx_type in (torch.int32, torch.int64): other_sizes = (3, 2) dim = 1 src = make_arg(other_sizes, num_src, dim, True) idx = make_tensor((num_out,), device, dtype=idx_type, low=0, high=num_src, noncontiguous=False) out = torch.index_select(src, dim, idx) out2 = ref_index_select(src, dim, idx) self.assertEqual(out, out2) # Create the 4 possible combinations of scalar sizes for index / source scalars = ((make_tensor(size_s, device, dtype), torch.zeros(size_i, dtype=torch.int64, device=device)) for size_s, size_i in product([(), (1,)], repeat=2)) for source, idx in scalars: out = source.index_select(0, idx) self.assertEqual(out.item(), source.item()) @dtypes(*torch.testing.get_all_dtypes()) def test_take(self, device, dtype): idx_size = (4,) make_arg = partial(make_tensor, device=device, dtype=dtype) make_idx = partial(make_tensor, low=0, device=device, dtype=torch.int64) def ref_take(src, idx): if dtype == torch.bfloat16: src = src.half() src = src.cpu().numpy() idx = idx.cpu().numpy() out = torch.from_numpy(np.take(src, idx)).to(device=device, dtype=dtype) return out for src_contig, idx_contig, idx_reshape in product([True, False], repeat=3): for src_size in ((5,), (4, 5)): src = make_arg(src_size, noncontiguous=not src_contig) idx = make_idx(idx_size, high=src.numel(), noncontiguous=not idx_contig) if idx_reshape: idx = idx.reshape(2, 2) out = torch.take(src, idx) out2 = ref_take(src, idx) self.assertEqual(out, out2) # Create the 4 possible combinations of scalar sizes for source / index for size_s, size_i in product([(), (1,)], repeat=2): source = make_arg(size_s) idx = make_idx(size_i, high=1) out = source.take(idx) self.assertEqual(out.item(), source.item()) # The bool instance does not work on GPU. See # https://github.com/pytorch/pytorch/issues/54317 @dtypes(*torch.testing.get_all_dtypes(include_bool=False)) def test_put(self, device, dtype): src_size = (4,) make_arg = partial(make_tensor, device=device, dtype=dtype) make_idx = partial(make_tensor, low=0, device=device, dtype=torch.int64) def ref_put(dst, idx, src, accumulate): new_dst = dst.clone(memory_format=torch.contiguous_format).view(-1) new_idx = idx.contiguous().view(-1) new_src = src.contiguous().view(-1) method = new_dst.index_add_ if accumulate else new_dst.index_copy_ return method(0, new_idx, new_src).view_as(dst) for dst_contig, src_contig, idx_contig, idx_reshape, accumulate in product([True, False], repeat=5): for dst_size in ((5,), (4, 5)): dst = make_arg(dst_size, noncontiguous=not dst_contig) src = make_arg(src_size, noncontiguous=not src_contig) # If accumulate=True, `put_` should be deterministic regardless of the inputs on CPU # On CUDA it may not be, but the test has enough tolerance to account for this if accumulate: idx = make_idx(src_size, high=dst.numel()) else: idx = torch.randperm(dst.numel(), dtype=torch.int64, device=device)[:src_size[0]] if not idx_contig: idx = torch.repeat_interleave(idx, 2, dim=-1)[..., ::2] if idx_reshape: idx = idx.reshape(2, 2) out = torch.put(dst, idx, src, accumulate) # out-place reference = ref_put(dst, idx, src, accumulate) self.assertEqual(out, reference) # in-place dst.put_(idx, src, accumulate) self.assertEqual(dst, reference) # Create the 8 possible combinations of scalar sizes for target / index / source scalars = ((make_arg(size_t), make_idx(size_i, high=1), make_arg(size_s)) for size_t, size_i, size_s in product([(), (1,)], repeat=3)) for (dest, idx, source), accumulate in product(scalars, [True, False]): dest_init = dest.clone() # out-place out = torch.put(dest, idx, source, accumulate=accumulate) # in-place dest1 = dest.clone() dest1.put_(idx, source, accumulate=accumulate) for d in [out, dest1]: if accumulate: self.assertEqual(d.item(), (dest_init + source).item()) else: self.assertEqual(d.item(), source.item()) # Empty case dest = make_arg((3, 2)) reference = dest.clone() idx = make_idx((0,), high=1) source = make_arg((0,)) for accumulate in [True, False]: out = torch.put(dest, idx, source, accumulate=accumulate) self.assertEqual(out, reference) dest.put_(idx, source, accumulate=accumulate) self.assertEqual(dest, reference) # The bool instance does not work on GPU. See # https://github.com/pytorch/pytorch/issues/54317 @dtypes(*torch.testing.get_all_dtypes(include_bool=False)) def test_put_accumulate(self, device, dtype): # Test for parallel adds with accumulate == True low_precision = dtype == torch.half or dtype == torch.bfloat16 # Less numbers to avoid overflow with low_precision # Grainsize is 3000 for the for_loop to be parallized on CPU sizes = ((100,)) if low_precision else ((200,), (3002,)) # Bfloat16 has a particularly bad performance here # This operation is nondeterministic on GPU, so we are generous with the rtol rtol, atol = (1e-1, 1e-2) if low_precision else (1e-3, 1e-4) make_arg = partial(make_tensor, low=-2, high=3, device=device, dtype=dtype) # Dump everything into the 0-th position make_idx = partial(torch.zeros, device=device, dtype=torch.int64) args = ((make_idx(size), make_arg(size)) for size in sizes) for idx, source in args: orig = make_arg((1,)) out = orig.put(idx, source, accumulate=True) self.assertEqual(out, orig + source.sum(), rtol=rtol, atol=atol) def test_take_empty(self, device): for input_shape in [(0,), (0, 1, 2, 0), (1, 2, 3)]: for indices_shape in [(0,), (0, 1, 2, 0)]: input = torch.empty(input_shape, device=device) indices = torch.empty(indices_shape, dtype=torch.int64, device=device) self.assertEqual(indices, torch.take(input, indices), exact_dtype=False) def test_put_empty(self, device): for dst_shape in [(0,), (0, 1, 2, 0), (1, 2, 3)]: for indices_shape in [(0,), (0, 1, 2, 0)]: for accumulate in [False, True]: dst = torch.randn(dst_shape, device=device) indices = torch.empty(indices_shape, dtype=torch.int64, device=device) src = torch.randn(indices_shape, device=device) self.assertEqual(dst, dst.put_(indices, src, accumulate=accumulate)) def scatter_allow_reduce(self, device, dtype, reduceop): device_type = torch.device(device).type return device_type != 'cuda' or (reduceop == 'multiply' and dtype.is_floating_point) # torch.{zeros, ones} do not support ComplexHalf (torch.complex32) # So, we are skipping it here. @dtypes(*(torch.testing.get_all_fp_dtypes(include_bfloat16=False, include_half=False) + torch.testing.get_all_complex_dtypes())) @dtypesIfCPU(*torch.testing.get_all_dtypes()) @dtypesIfCUDA(*torch.testing.get_all_dtypes()) def test_scatter_reduce_operations_to_large_input(self, device, dtype): index = torch.tensor([[1], [2]], device=device, dtype=torch.long) test_data = [ (torch.zeros(4, 4, device=device, dtype=dtype), torch.ones(2, 2, device=device, dtype=dtype), torch.tensor([[0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]], device=device, dtype=dtype), "add"), (torch.tensor([2], device=device, dtype=dtype).repeat(4, 4), torch.tensor([6], device=device, dtype=dtype).repeat(2, 2), torch.tensor([[2, 2, 2, 2], [12, 2, 2, 2], [12, 2, 2, 2], [2, 2, 2, 2]], device=device, dtype=dtype), "multiply"), ] for input, src, result, operation in test_data: if not self.scatter_allow_reduce(device, dtype, operation): continue input.scatter_(0, index, src, reduce=operation) self.assertEqual(input, result) # torch.{zeros, ones} do not support ComplexHalf (torch.complex32) # So, we are skipping it here. @dtypes(*(torch.testing.get_all_fp_dtypes(include_bfloat16=False, include_half=False) + torch.testing.get_all_complex_dtypes())) @dtypesIfCPU(*torch.testing.get_all_dtypes()) @dtypesIfCUDA(*torch.testing.get_all_dtypes()) def test_scatter_reduce_scalar(self, device, dtype): index = torch.tensor([[1], [2]], device=device, dtype=torch.long) test_data = [ (torch.zeros(4, 4, device=device, dtype=dtype), 1, torch.tensor([[0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]], device=device, dtype=dtype), "add"), (torch.tensor([2], device=device, dtype=dtype).repeat(4, 4), 2, torch.tensor([[2, 2, 2, 2], [4, 2, 2, 2], [4, 2, 2, 2], [2, 2, 2, 2]], device=device, dtype=dtype), "multiply"), ] for input, src, result, operation in test_data: if not self.scatter_allow_reduce(device, dtype, operation): continue input.scatter_(0, index, src, reduce=operation) self.assertEqual(input, result) # TODO: remove this after scatter_add_ is deprecated. def test_scatter_add_non_unique_index(self, device): height = 2 width = 65536 input = torch.ones(height, width, device=device) index = torch.zeros(height, width, dtype=torch.long, device=device) src = torch.ones(height, width, device=device) input.scatter_add_(0, index, src) self.assertEqual(input, torch.tensor([[3], [1]], device=device, dtype=torch.float32).repeat(1, width)) # torch.{zeros, ones} do not support ComplexHalf (torch.complex32) # So, we are skipping it here. @dtypes(*(torch.testing.get_all_fp_dtypes(include_bfloat16=False, include_half=False) + torch.testing.get_all_complex_dtypes())) @dtypesIfCPU(*torch.testing.get_all_dtypes()) @dtypesIfCUDA(*torch.testing.get_all_dtypes()) def test_scatter_reduce_non_unique_index(self, device, dtype): height = 2 width = 2 index = torch.zeros(height, width, dtype=torch.long, device=device) test_data = [ (torch.ones(height, width, device=device, dtype=dtype), torch.ones(height, width, device=device, dtype=dtype), torch.tensor([[3], [1]], device=device, dtype=dtype).repeat(1, width), "add"), (torch.tensor([2], device=device, dtype=dtype).repeat(height, width), torch.tensor([2], device=device, dtype=dtype).repeat(height, width), torch.tensor([[8], [2]], device=device, dtype=dtype).repeat(1, width), "multiply"), ] for input, src, result, operation in test_data: if not self.scatter_allow_reduce(device, dtype, operation): continue input.scatter_(0, index, src, reduce=operation) self.assertEqual(input, result, msg=f"result: {result} input: {input} method: {str(operation)}") # torch.{zeros, ones} do not support ComplexHalf (torch.complex32) # So, we are skipping it here. @onlyCUDA @dtypesIfCUDA(*(torch.testing.get_all_complex_dtypes() + torch.testing.get_all_int_dtypes())) def test_scatter_reduce_multiply_unsupported_dtypes(self, device, dtype): height = 2 width = 2 index = torch.zeros(height, width, dtype=torch.long, device=device) input = torch.ones(height, width, device=device, dtype=dtype) src = torch.ones(height, width, device=device, dtype=dtype) with self.assertRaises(RuntimeError): input.scatter_(0, index, src, reduce="multiply") def test_scatter_to_large_input(self, device): input = torch.zeros(4, 4, device=device) src = torch.ones(2, 2, device=device) index = torch.tensor([[1], [2]], device=device, dtype=torch.long) input.scatter_(0, index, src) self.assertEqual(input, torch.tensor([[0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]], device=device, dtype=torch.float32)) def test_scatter_add_to_large_input(self, device): input = torch.zeros(4, 4, device=device) src = torch.ones(2, 2, device=device) index = torch.tensor([[1], [2]], device=device, dtype=torch.long) input.scatter_add_(0, index, src) self.assertEqual(input, torch.tensor([[0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]], device=device, dtype=torch.float32)) def test_scatter_bool(self, device): x = torch.tensor([[True, True, True], [True, True, True]], device=device) res = torch.zeros(3, 3, dtype=torch.bool, device=device) res = res.scatter_(0, torch.tensor([[0, 1, 2], [0, 1, 2]], device=device), x) self.assertEqual(res, torch.tensor([[True, False, False], [False, True, False], [False, False, True]], device=device)) def test_scatter_add_bool(self, device): x = torch.tensor([[True, True, True, True, True], [True, True, True, True, True]], device=device) res = torch.zeros(3, 5, dtype=torch.bool, device=device) res = res.scatter_add_(0, torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]], device=device), x) self.assertEqual(res, torch.tensor([[True, True, True, True, True], [False, True, False, True, False], [True, False, True, False, True]], device=device)) @onlyOnCPUAndCUDA @dtypes(*torch.testing.get_all_dtypes()) def test_masked_scatter(self, device, dtype): dt = dtype with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") for maskType in [torch.uint8, torch.bool]: num_copy, num_dest = 3, 10 dest = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dt, device=device) dest2 = dest.clone() dest_ones = dest.clone() dest_ones_expected = dest.clone() src = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=dt, device=device) src_ones = torch.tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=dt, device=device) mask = torch.tensor((0, 0, 0, 0, 1, 0, 1, 0, 1, 0), dtype=maskType, device=device) if dt == torch.bool: # torch.bool is a special case and is being tested # in a separate test return dest.masked_scatter_(mask, src) j = 0 for i in range(num_dest): if mask[i]: dest2[i] = src[j] dest_ones_expected[i] = src_ones[j] j += 1 self.assertEqual(dest, dest2, atol=0, rtol=0) dest_ones.masked_scatter_(mask, src_ones) self.assertEqual(dest_ones, dest_ones_expected, atol=0, rtol=0) # Bound checking in CUDA is done inside a kernel # in order to avoid synchronization, but this means # we can not clear the failures. So there is no way # to test it then recover. if self.device_type != 'cuda': # make src smaller. this should fail src = torch.zeros(num_copy - 1, dtype=dt, device=device) with self.assertRaises(RuntimeError): dest.masked_scatter_(mask, src) # empty tensor dest = torch.empty((5, 0, 5), dtype=dt, device=device) mask = torch.ones_like(dest, dtype=maskType, device=device) src = torch.empty((0,), dtype=dt, device=device) dest.masked_scatter_(mask, src) dest = torch.empty((5, 0, 5), dtype=dt, device=device) mask = torch.ones((5, 1, 5), dtype=maskType, device=device) src = torch.empty((0,), dtype=dt, device=device) dest.masked_scatter_(mask, src) if self.device_type != 'cuda': self.assertEqual(len(w), 5) else: self.assertEqual(len(w), 4) warn = 'masked_scatter_ received a mask with dtype torch.uint8,' for wi in w: self.assertEqual(str(wi.message)[0:55], str(warn)) def test_masked_scatter_bool_tensor(self, device): src = torch.tensor([True, True, True], device=device) dst = torch.tensor([False, False, False], device=device) mask = torch.tensor([False, True, False], device=device) dst.masked_scatter_(mask, src) self.assertEqual(dst, torch.tensor([False, True, False], device=device)) mask = torch.tensor([True, False, True], device=device) dst = dst.masked_scatter(mask, src) self.assertEqual(dst, torch.tensor([True, True, True], device=device)) @onlyCUDA @largeTensorTest('30GB') def test_masked_scatter_large_tensor(self, device): t_cpu = torch.empty(2**31 + 1, dtype=torch.bool).random_() t = t_cpu.to(device) result_cpu = t_cpu.masked_scatter(t_cpu, t_cpu) result = t.masked_scatter(t, t) self.assertEqual(result, result_cpu) @dtypes(*torch.testing.get_all_dtypes()) def test_masked_select(self, device, dtype): if device == 'cpu': warn = 'masked_select received a mask with dtype torch.uint8,' else: warn = 'indexing with dtype torch.uint8 is now deprecated, pl' for maskType in [torch.uint8, torch.bool]: num_src = 10 src = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=dtype, device=device) mask = torch.randint(2, (num_src,), device=device, dtype=maskType) with warnings.catch_warnings(record=True) as w: dst = src.masked_select(mask) if maskType is torch.uint8: self.assertEqual(len(w), 1) self.assertEqual(str(w[0].message)[0:53], str(warn)) dst2 = [] for i in range(num_src): if mask[i]: dst2 += [src[i]] self.assertEqual(dst, torch.tensor(dst2), atol=0, rtol=0) dst3 = torch.empty(0, device=device, dtype=dtype) torch.masked_select(src, mask, out=dst3) self.assertEqual(dst3, torch.tensor(dst2, dtype=dst3.dtype), atol=0, rtol=0) # Since half on CPU is not supported, need to skip the remaining test cases if dtype == torch.half and torch.device(device).type == 'cpu': return # Ensure that masks are expanded to match tensor properly a = torch.rand(100, 100, device=device).mul(100).to(dtype) mask_first_el_each_row = torch.zeros(100, device=device, dtype=torch.bool) mask_first_el_each_row[0] = True a_masked = a.masked_select(mask_first_el_each_row) self.assertEqual(a_masked, a[:, 0]) mask_first_row = torch.zeros(100, 1, device=device, dtype=torch.bool) mask_first_row[0][0] = True a_masked = a.masked_select(mask_first_row) self.assertEqual(a_masked, a[0, :]) # Ensure that tensor is expanded to match mask properly a = torch.rand(100, device=device).mul(100).to(dtype) mask_copy_3_times = torch.tensor([[True], [True], [False], [True]], device=device) a_masked = a.masked_select(mask_copy_3_times) self.assertEqual(a_masked, a.unsqueeze(0).expand(3, 100).flatten()) def test_masked_select_discontiguous(self, device): for size in (10, 200): vals = torch.rand(size, size, device=device) mask = torch.full((size, size), False, dtype=torch.bool, device=device) mask[:, ::2] = True vals_list = (vals, vals.t()) mask_list = (mask, mask.t()) out_dc = torch.empty(size * size, device=device)[::2] for v, m in product(vals_list, mask_list): if m.is_contiguous(): expected = v[:, ::2].clone().view(-1) else: expected = v[::2].clone().view(-1) out = torch.masked_select(v, m) self.assertEqual(out, expected, atol=0, rtol=0) torch.masked_select(v, m, out=out_dc) self.assertEqual(out_dc, expected, atol=0, rtol=0) @dtypes(*product(torch.testing.get_all_dtypes(), (torch.uint8, torch.bool))) def test_masked_fill(self, device, dtypes): dtype = dtypes[0] mask_dtype = dtypes[1] with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") num_dest = 10 dst = torch.zeros(num_dest, dtype=dtype) mask = torch.randint(2, (num_dest,), dtype=mask_dtype) val = random.random() dst2 = dst.clone() dst.masked_fill_(mask, val) for i in range(num_dest): if mask[i]: dst2[i] = val self.assertEqual(dst, dst2, atol=0, rtol=0) # test non-contiguous case dst = ((torch.randn(num_dest, num_dest, num_dest) * 10).to(dtype)).permute((2, 0, 1)) dst2 = dst.contiguous() if dtype.is_complex: mask = dst.abs() > 0 else: mask = dst > 0 self.assertTrue(not dst.is_contiguous()) self.assertTrue(dst2.is_contiguous()) dst.masked_fill_(mask.to(mask_dtype), val) dst2.masked_fill_(mask.to(mask_dtype), val) self.assertEqual(dst, dst2, atol=0, rtol=0) if mask_dtype == torch.uint8: self.assertEqual(len(w), 3) warn = 'masked_fill_ received a mask with dtype torch.uint8,' for wi in w: self.assertEqual(str(wi.message)[0:52], str(warn)) else: self.assertEqual(len(w), 0) def test_masked_fill_bool_tensor(self, device): dst = torch.tensor([True, False, True], device=device) mask = torch.tensor([False, True, False], device=device) dst.masked_fill_(mask, True) self.assertEqual(dst, torch.tensor([True, True, True], device=device)) dst = dst.masked_fill(mask, False) self.assertEqual(dst, torch.tensor([True, False, True], device=device)) def test_tensor_shape_empty(self, device): x = torch.randn((0, 1, 3, 0), device=device) # flatten self.assertEqual((0,), torch.flatten(x, 0, 3).shape) self.assertEqual((0, 0), torch.flatten(x, 0, 2).shape) self.assertEqual((0, 3, 0), torch.flatten(x, 1, 2).shape) # squeeze, unsqueeze self.assertEqual((0, 1, 1, 3, 0), torch.unsqueeze(x, 1).shape) self.assertEqual((0, 3, 0), torch.squeeze(x, 1).shape) self.assertEqual((0, 3, 0), torch.squeeze(x).shape) # transpose, t self.assertEqual((0, 0, 3, 1), torch.transpose(x, 1, 3).shape) y = torch.randn((5, 0), device=device) self.assertEqual((0, 5), y.t().shape) # select self.assertEqual((0, 1, 0), torch.select(x, 2, 2).shape) # repeat, permute self.assertEqual((9, 0, 5, 6, 0), x.repeat(9, 7, 5, 2, 3).shape) self.assertEqual((3, 0, 0, 1), x.permute(2, 3, 0, 1).shape) # diagonal, diagflat self.assertEqual((0,), torch.diagonal(torch.randn((5, 0), device=device)).shape) self.assertEqual((0,), torch.diagonal(torch.randn((0, 5), device=device)).shape) # off the end offsets are valid self.assertEqual((0,), torch.diagonal(torch.randn((5, 0), device=device), offset=1).shape) self.assertEqual((0,), torch.diagonal(torch.randn((0, 5), device=device), offset=1).shape) # check non-zero sized offsets off the end self.assertEqual((5, 6, 0), torch.diagonal(torch.randn((3, 4, 5, 6), device=device), offset=45252).shape) self.assertEqual((5, 6, 0), torch.diagonal(torch.randn((3, 4, 5, 6), device=device), offset=-45252).shape) self.assertEqual((0, 0), torch.diagflat(torch.tensor([], device=device)).shape) self.assertEqual(torch.zeros(1, 1), torch.diagflat(torch.tensor([], device=device), offset=1)) self.assertEqual((0, 0), torch.diagflat(torch.tensor([[]], device=device)).shape) self.assertEqual(torch.zeros(1, 1), torch.diagflat(torch.tensor([[]], device=device), offset=1)) # stack, split, chunk self.assertEqual((4, 0, 1, 3, 0), torch.stack((x, x, x, x)).shape) self.assertEqual([(0, 1, 3, 0)], [z.shape for z in torch.chunk(x, 1, dim=0)]) self.assertEqual([(0, 1, 3, 0), ] * 3, [z.shape for z in torch.chunk(x, 3, dim=0)]) self.assertEqual([(0, 1, 1, 0), ] * 3, [z.shape for z in torch.chunk(x, 3, dim=2)]) # NOTE: split_with_sizes behaves differently than NumPy in that it # takes sizes rather than offsets self.assertEqual([(0, 1, 0, 0), (0, 1, 1, 0), (0, 1, 2, 0)], [z.shape for z in torch.split(x, (0, 1, 2), dim=2)]) self.assertRaises(RuntimeError, lambda: torch.split(x, 0, dim=1)) # This is strange because the split size is larger than the dim size, but consistent with # how split handles that case generally (when no 0s are involved). self.assertEqual([(0, 1, 3, 0)], [z.shape for z in torch.split(x, 1, dim=0)]) self.assertEqual([(0, 1, 3, 0)], [z.shape for z in torch.split(x, 0, dim=0)]) # functions that operate over a dimension but don't reduce. def test_dim_function_empty(self, device): shape = (0, 1, 2, 0) x = torch.randn(shape, device=device) # size stride self.assertEqual(0, x.size(3)) self.assertEqual(2, x.size(2)) self.assertEqual(2, x.stride(0)) self.assertEqual(1, x.stride(2)) self.assertEqual(x, torch.nn.functional.glu(x, 0)) self.assertEqual((0, 1, 1, 0), torch.nn.functional.glu(x, 2).shape) # softmax, logsoftmax self.assertEqual(x, torch.nn.functional.softmax(x, 0)) self.assertEqual(x, torch.nn.functional.softmax(x, 2)) self.assertEqual(x, torch.nn.functional.softmax(x, 3)) self.assertEqual(x, torch.nn.functional.log_softmax(x, 0)) self.assertEqual(x, torch.nn.functional.log_softmax(x, 2)) self.assertEqual(x, torch.nn.functional.log_softmax(x, 3)) # cumsum, cumprod, cummax, cummin self.assertEqual(shape, torch.cumsum(x, 0).shape) self.assertEqual(shape, torch.cumsum(x, 2).shape) self.assertEqual(shape, torch.cumprod(x, 0).shape) self.assertEqual(shape, torch.cumprod(x, 2).shape) self.assertEqual(shape, torch.cummax(x, 0)[0].shape) self.assertEqual(shape, torch.cummax(x, 2)[0].shape) self.assertEqual(shape, torch.cummin(x, 0)[0].shape) self.assertEqual(shape, torch.cummin(x, 2)[0].shape) self.assertEqual(shape, torch.logcumsumexp(x, 0).shape) self.assertEqual(shape, torch.logcumsumexp(x, 2).shape) # flip self.assertEqual(x, x.flip(0)) self.assertEqual(x, x.flip(2)) # roll self.assertEqual(x, x.roll(0, 1).roll(0, -1)) self.assertEqual(x, x.roll(1, x.size(1))) self.assertEqual(x, x.roll(1)) self.assertEqual(x, x.roll((1, 1), (3, 1))) # unbind self.assertEqual((), x.unbind(0)) self.assertEqual((torch.empty((0, 1, 0), device=device), torch.empty((0, 1, 0), device=device)), x.unbind(2)) # cross y = torch.randn((0, 1, 3, 0), device=device) self.assertEqual(y.shape, torch.cross(y, y).shape) # renorm self.assertEqual(shape, torch.renorm(x, 1, 0, 5).shape) self.assertEqual(shape, torch.renorm(x, 1, 2, 5).shape) # sort self.assertEqual([shape, shape], [z.shape for z in torch.sort(x, dim=0)]) self.assertEqual([shape, shape], [z.shape for z in torch.sort(x, dim=2)]) # topk self.assertEqual([shape, shape], [z.shape for z in torch.topk(x, 0, dim=0)]) self.assertEqual([(0, 1, 1, 0), (0, 1, 1, 0)], [z.shape for z in torch.topk(x, 1, dim=2)]) y = torch.randn((2, 3, 4), device=device) self.assertEqual([(2, 3, 0), (2, 3, 0)], [z.shape for z in torch.topk(y, 0)]) # gather self.assertEqual(shape, torch.gather(x, 0, torch.empty(shape, dtype=torch.int64, device=device)).shape) self.assertEqual(shape, torch.gather(x, 2, torch.empty(shape, dtype=torch.int64, device=device)).shape) larger_shape = torch.empty((0, 1, 3, 0), dtype=torch.int64, device=device) self.assertEqual(larger_shape.shape, torch.gather(x, 2, larger_shape).shape) smaller_shape = torch.empty((0, 1, 0, 0), dtype=torch.int64, device=device) self.assertEqual(smaller_shape.shape, torch.gather(x, 2, smaller_shape).shape) y = torch.randn((2, 3, 4), device=device) self.assertEqual((0, 3, 4), torch.gather(y, 0, torch.empty((0, 3, 4), dtype=torch.int64, device=device)).shape) # scatter, scatter_add for dim in [0, 2]: y = torch.randn(shape, device=device) y_src = torch.randn(shape, device=device) ind = torch.empty(shape, dtype=torch.int64, device=device) self.assertEqual(shape, y.scatter_(dim, ind, y_src).shape) self.assertEqual(shape, y.scatter_add_(dim, ind, y_src).shape) z = torch.randn((2, 3, 4), device=device) z_src = torch.randn((2, 3, 4), device=device) self.assertEqual(z, z.scatter_(2, torch.empty((2, 3, 0), dtype=torch.int64, device=device), z_src)) self.assertEqual(z, z.scatter_add_(2, torch.empty((2, 3, 0), dtype=torch.int64, device=device), z_src)) # index_fill, index_copy, index_add c = x.clone() c_clone = c.clone() ind_empty = torch.tensor([], dtype=torch.int64, device=device) ind_01 = torch.tensor([0, 1], dtype=torch.int64, device=device) self.assertEqual(c_clone, c.index_fill_(0, ind_empty, -1)) self.assertEqual(c_clone, c.index_fill_(2, ind_empty, -1)) self.assertEqual(c_clone, c.index_fill_(2, torch.tensor([0, 1], dtype=torch.int64, device=device), -1)) self.assertEqual(c_clone, c.index_copy_(0, ind_empty, torch.empty((0, 1, 2, 0), device=device))) self.assertEqual(c_clone, c.index_copy_(2, ind_empty, torch.empty((0, 1, 0, 0), device=device))) self.assertEqual(c_clone, c.index_copy_(2, ind_01, torch.empty((0, 1, 2, 0), device=device))) self.assertEqual(c_clone, c.index_add_(0, ind_empty, torch.empty((0, 1, 2, 0), device=device))) self.assertEqual(c_clone, c.index_add_(2, ind_empty, torch.empty((0, 1, 0, 0), device=device))) self.assertEqual(c_clone, c.index_add_(2, ind_01, torch.empty((0, 1, 2, 0), device=device))) c = torch.randn((0, 1, 2), device=device) c_clone = c.clone() self.assertEqual(c_clone, c.index_fill_(0, ind_empty, -1)) self.assertEqual(c_clone, c.index_copy_(0, ind_empty, torch.empty((0, 1, 2), device=device))) self.assertEqual(c_clone, c.index_add_(0, ind_empty, torch.empty((0, 1, 2), device=device))) self.assertEqual(c_clone, c.index_fill_(0, ind_empty, -1)) self.assertEqual(c_clone, c.index_copy_(0, ind_empty, torch.empty((0, 1, 2), device=device))) self.assertEqual(c_clone, c.index_add_(0, ind_empty, torch.empty((0, 1, 2), device=device))) # index fill/copy/add non-empty z = torch.randn((2, 3, 4), device=device) self.assertEqual(z, z.index_fill_(0, ind_empty, -1)) z = torch.randn((2, 3, 4), device=device) self.assertEqual(z, z.index_copy_(0, ind_empty, torch.empty((0, 3, 4), device=device))) z = torch.randn((2, 3, 4), device=device) self.assertEqual(z, z.index_add_(0, ind_empty, torch.empty((0, 3, 4), device=device))) # index_select self.assertEqual(x, x.index_select(0, ind_empty)) self.assertEqual((0, 1, 0, 0), x.index_select(2, ind_empty).shape) self.assertEqual(x, x.index_select(2, ind_01)) z = torch.randn((2, 3, 4), device=device) # non-empty self.assertEqual((0, 3, 4), z.index_select(0, ind_empty).shape) c = torch.randn((0, 1, 2), device=device) self.assertEqual(c, c.index_select(0, ind_empty)) c = torch.randn((0, 1, 2), device=device) self.assertEqual(c, c.index_select(0, ind_empty)) def _brute_pdist(self, inp, p=2): """Computes the same as torch.pdist using primitives""" n = inp.shape[-2] k = n * (n - 1) // 2 if k == 0: # torch complains about empty indices return torch.empty(inp.shape[:-2] + (0,), dtype=inp.dtype, device=inp.device) square = torch.norm(inp[..., None, :] - inp[..., None, :, :], p=p, dim=-1) unroll = square.view(square.shape[:-2] + (n * n,)) inds = torch.ones(k, dtype=torch.int) inds[torch.arange(n - 1, 1, -1, dtype=torch.int).cumsum(0)] += torch.arange(2, n, dtype=torch.int) return unroll[..., inds.cumsum(0)] def _pdist_single(self, shape, device, p, dtype, trans, grad_check=False): x = torch.randn(shape, dtype=dtype, device=device) if trans: x.transpose_(-2, -1) if grad_check: x.requires_grad_() y = x.detach().clone().requires_grad_() else: y = x actual = torch.pdist(x, p=p) expected = self._brute_pdist(y, p=p) self.assertEqual(expected.shape, actual.shape) self.assertEqual(expected, actual) if grad_check and expected.size() != torch.Size([0]): g0 = torch.rand_like(actual) actual.backward(g0) expected.backward(g0) self.assertEqual(x.grad, y.grad) @slowTest def test_pdist_norm_forward(self, device): for shape in [(4, 5), (3, 2), (2, 1), (1500, 1)]: for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: for trans in [False, True]: for dtype in [torch.float32, torch.float64]: self._pdist_single(shape, device, p, dtype, trans, grad_check=False) # do a simplified comparison with big inputs, see: # https://github.com/pytorch/pytorch/issues/15511 for dtype in [torch.float32, torch.float64]: self._pdist_single((1000, 2), device, 2, dtype, trans=False, grad_check=False) @slowTest def test_pdist_norm_backward(self, device): for shape in [(4, 5), (3, 2), (2, 1), (1500, 1)]: for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: for trans in [False, True]: self._pdist_single(shape, device, p, torch.float64, trans, grad_check=True) @unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "sandcastle OOM with current tpx gpu/re configuration") @skipIfRocm def test_pdist_norm_large(self, device): # use dim0>=46342 for forward, see: # https://github.com/pytorch/pytorch/issues/30583 # Compare output using GPU with the CPU implementation, as brute_pdist uses too much memory if 'cuda' in device: x = torch.randn(50000, 1, dtype=torch.float32) expected_cpu = torch.pdist(x, p=2) actual_gpu = torch.pdist(x.to(device), p=2) self.assertEqual(expected_cpu, actual_gpu.cpu()) @onlyOnCPUAndCUDA @dtypesIfCUDA(*set(torch.testing.get_all_math_dtypes('cuda'))) @dtypes(*set(torch.testing.get_all_math_dtypes('cpu'))) def test_addcdiv(self, device, dtype): # Returns floating or integral scalar corresponding to dtype def _number(floating, integer, dtype): if dtype in [torch.half, torch.float, torch.double, torch.bfloat16]: return floating elif dtype in [torch.cfloat, torch.cdouble]: return floating * (1 + 1j) else: return integer def non_zero_rand(size, dtype, device): if dtype.is_floating_point or dtype.is_complex: a = torch.rand(size=size, dtype=dtype, device=device) elif dtype == torch.uint8: a = torch.randint(1, 5, size=size, dtype=dtype, device=device) else: a = torch.randint(-5, 5, size=size, dtype=dtype, device=device) return a + (a == 0).to(dtype) def _test_addcdiv(): a = non_zero_rand((2, 2), dtype=dtype, device=device) b = non_zero_rand((2, 2), dtype=dtype, device=device) c = non_zero_rand((2, 2), dtype=dtype, device=device) alpha = _number(0.5, 3, dtype) expected = a + (alpha * b) / c actual = torch.addcdiv(a, b, c, value=alpha) self.assertEqual(expected, actual) with self.assertWarnsOnceRegex( UserWarning, "This overload of addcdiv is deprecated"): self.assertEqual(actual, torch.addcdiv(a, alpha, b, c)) if not (dtype.is_floating_point or dtype.is_complex): # Integer division with addcdiv is prohibited with self.assertRaises(RuntimeError): _test_addcdiv() else: _test_addcdiv() if self.device_type == 'cuda' and dtype == torch.half: a = torch.tensor([60000.0], device=device, dtype=dtype) b = torch.tensor([60000.0], device=device, dtype=dtype) c = torch.tensor([1.0], device=device, dtype=dtype) out = torch.addcmul(a, b, c, value=-2) self.assertTrue(not (out.isnan() or out.isinf())) def test_nullary_op_mem_overlap(self, device): ops = ( ("random_", ()), ("uniform_", ()), ("cauchy_", ()), ("log_normal_", ()), ("exponential_", ()), ("geometric_", (0.5,)), ("normal_", ()), ) x = torch.rand((1, 3)).expand((3, 3)) for op, args in ops: with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): getattr(x, op)(*args) @dtypes(torch.double) def test_ternary_op_mem_overlap(self, device, dtype): ops = [ ("addcmul", True, True, 'cpu'), ("addcmul", True, True, 'cuda'), ("addcdiv", True, True, 'cpu'), ("addcdiv", True, True, 'cuda'), ("lerp", True, True, 'cpu'), ("lerp", True, True, 'cuda') ] for (fn, has_input_output_mem_overlap_check, has_internal_mem_overlap_check, dev) in ops: if dev != device: continue out_op = getattr(torch, fn) inplace_op = getattr(torch.Tensor, fn + '_') self.check_internal_mem_overlap( inplace_op, 3, dtype, device, expected_failure=not has_internal_mem_overlap_check) self.ternary_check_input_output_mem_overlap(out_op, dev, expected_failure=not has_input_output_mem_overlap_check) @dtypes(torch.double) @onlyOnCPUAndCUDA def test_copy_mem_overlap(self, device, dtype): self.check_internal_mem_overlap( torch.Tensor.copy_, num_inputs=2, dtype=dtype, device=device) sz = 9 doubles = torch.randn(2 * sz, dtype=dtype, device=device) self.unary_check_input_output_mem_overlap( doubles, sz, lambda input, out: out.copy_(input)) @onlyOnCPUAndCUDA def test_index_add_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) y = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device) value = torch.rand((3,), device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.index_add_(0, ind, value) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.index_add_(0, ind, y[:3]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_add_(0, ind, ind.clone()) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_add_(0, ind.clone(), ind) @onlyOnCPUAndCUDA def test_index_copy_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) y = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device) value = torch.rand((3,), device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.index_copy_(0, ind, value) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.index_copy_(0, ind, y[:3]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_copy_(0, ind, ind.clone()) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_copy_(0, ind.clone(), ind) @onlyOnCPUAndCUDA def test_index_fill_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) y = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device) value = torch.rand((3,), device=device) with self.assertWarnsRegex(UserWarning, "index_fill_ on expanded tensors"): x.index_fill_(0, ind, 1.0) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_fill_(0, ind, 0) @onlyOnCPUAndCUDA def test_shift_mem_overlap(self, device): x = torch.rand(3, device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x[:-1] <<= x[1:] with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x[:-1] >>= x[1:] @onlyOnCPUAndCUDA def test_bernoulli_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.bernoulli_() with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.bernoulli_(p=0.1) p = torch.rand(6, device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.bernoulli_(p=p) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.bernoulli(torch.rand_like(x), out=x) @onlyOnCPUAndCUDA def test_put_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) y = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device) value = torch.rand((3,), device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.put_(ind, value) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.put_(ind[0], y[0]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.put_(ind, ind) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.put_(ind, y[:3]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.put_(ind, ind.clone()) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.put_(ind.clone(), ind) @onlyOnCPUAndCUDA def test_index_put_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) y = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device) value = torch.rand((3,), device=device) with self.assertWarnsRegex(UserWarning, 'expanded tensors'): x.index_put_((ind,), value) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.index_put_((ind,), y[0]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_put_((ind,), ind) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.index_put_((ind,), y[:3]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_put_((ind,), ind.clone()) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_put_((ind.clone(),), ind) @onlyOnCPUAndCUDA def test_masked_fill_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) mask = torch.tensor([True, False, True, True, False, False], device=device) with self.assertWarnsRegex(UserWarning, 'expanded tensors'): x.masked_fill_(mask, 0.) fill_val = torch.tensor(0., device=device) with self.assertWarnsRegex(UserWarning, 'expanded tensors'): x.masked_fill_(mask, fill_val) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): mask[1:].masked_fill_(mask[:-1], False) @onlyOnCPUAndCUDA def test_masked_select_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((3,)) y = torch.rand((6,), device=device) mask = torch.tensor([True, False, True, True, False, False], device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.masked_select(y, mask, out=x) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.masked_select(y, mask, out=y) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.masked_select(mask.clone(), mask, out=mask) @onlyOnCPUAndCUDA def test_masked_scatter_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) src = torch.rand((3,), device=device) mask = torch.tensor([True, False, True, True, False, False], device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.masked_scatter_(mask, src) @onlyOnCPUAndCUDA def test_index_select_mem_overlap(self, device): x = torch.rand((1, 6), device=device).expand((2, 6)) y = torch.rand((3, 6), device=device) ind = torch.tensor([0, 1], dtype=torch.int64, device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.index_select(y, 1, ind, out=x) @onlyOnCPUAndCUDA def test_scatter_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) src = torch.rand((3,), device=device) ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.scatter_(0, ind, src) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): src.scatter_(0, ind, src) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.scatter_(0, ind, ind.clone()) @onlyOnCPUAndCUDA def test_gather_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((3,)) src = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.gather(src, 0, ind, out=x) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.gather(src, 0, ind, out=src) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.gather(ind.clone(), 0, ind[1:], out=ind[:1]) @onlyOnCPUAndCUDA def test_take_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((3,)) src = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.take(src, ind, out=x) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.take(src, ind, out=src) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.take(ind.clone(), ind[1:], out=ind[:-1]) @onlyCUDA def test_multinomial_device_constrain(self, device): x = torch.empty(0, device="cpu") y = torch.empty(0, device=device) self.assertRaisesRegex( RuntimeError, "Expected all tensors to be on the same device", lambda: torch.multinomial(x, 2, out=y)) @deviceCountAtLeast(2) @onlyCUDA def test_multinomial_gpu_device_constrain(self, devices): x = torch.empty(0, device=devices[0]) y = torch.empty(0, device=devices[1]) self.assertRaisesRegex( RuntimeError, "Expected all tensors to be on the same device", lambda: torch.multinomial(x, 2, out=y)) @deviceCountAtLeast(2) @onlyCUDA def test_device_guard(self, devices): # verify that all operators with `device_guard: False` behave properly with multiple devices. # TODO: if we had operator introspection we could figure out this set of operators automatically... x = torch.randn((1, 2, 3), device=devices[1]) y = torch.zeros((1, 3, 2), device=devices[1]) scalar = torch.tensor(5, device=devices[1]) # property ops torch.cudnn_is_acceptable(x) x.is_distributed() x.is_floating_point() x.is_complex() x.is_same_size(y) x.is_signed() x.size(0) x.stride(0) x.numel() x.is_set_to(y) x.data_ptr() scalar.is_nonzero() # sparse property ops y[0][1] = 5 y_sparse = y.to_sparse() y_sparse.sparse_dim() y_sparse._dimI() y_sparse.dense_dim() y_sparse._dimV() y_sparse._nnz() y_sparse.is_coalesced() y_sparse._indices() y_sparse._values() y_sparse.indices() y_sparse.values() # in-place ops def inplace(): return torch.randn((1, 2, 3), device=devices[1]) inplace().as_strided_(y.size(), y.stride()) inplace().resize_(y.size()) inplace().squeeze_() inplace().squeeze_(0) inplace().unsqueeze_(2) inplace().transpose_(1, 2) inplace().squeeze_().t_() inplace().set_(x.storage()) inplace().set_(x.storage(), x.storage_offset(), x.size(), x.stride()) inplace().set_(x) inplace().set_() y_sparse._coalesced_(True) # shape modification x.as_strided(y.size(), y.stride()) x.expand((5, 2, 3)) x.expand_as(x) x.sum_to_size((1,)) torch.broadcast_tensors(x , x) x.reshape((1, 3, 2)) x.reshape_as(y) x.squeeze() x.squeeze(0) x.squeeze().t() x.transpose(1, 2) x.unsqueeze(2) x.view((1, 3, 2)) x.view_as(y) # chunk, split, etc. x.chunk(2, dim=1) x.split(1, dim=2) x.split_with_sizes([1, 2], dim=2) x.unfold(dimension=2, size=1, step=1) x.narrow(1, 1, 1) x.select(1, 1) torch.isnan(x) torch.empty((1, 3, 2), out=y) torch.empty_like(x) torch.empty_like(x, dtype=torch.int64) # to x.to(x) x.to(y) x.to(x, copy=True) def test_is_signed(self, device): self.assertEqual(torch.IntTensor(5).to(device).is_signed(), True) self.assertEqual(torch.ByteTensor(5).to(device).is_signed(), False) self.assertEqual(torch.CharTensor(5).to(device).is_signed(), True) self.assertEqual(torch.FloatTensor(5).to(device).is_signed(), True) self.assertEqual(torch.HalfTensor(10).to(device).is_signed(), True) # Note - reports a leak of 512 bytes on CUDA device 1 @deviceCountAtLeast(2) @skipCUDAMemoryLeakCheckIf(True) @onlyCUDA def test_tensor_set_errors_multigpu(self, devices): f_cuda0 = torch.randn((2, 3), dtype=torch.float32, device=devices[0]) f_cuda1 = torch.randn((2, 3), dtype=torch.float32, device=devices[1]) self.assertRaises(RuntimeError, lambda: f_cuda0.set_(f_cuda1.storage())) self.assertRaises(RuntimeError, lambda: f_cuda0.set_(f_cuda1.storage(), 0, f_cuda1.size(), f_cuda1.stride())) self.assertRaises(RuntimeError, lambda: f_cuda0.set_(f_cuda1)) @onlyCUDA def test_half_tensor(self, device): x = torch.randn(5, 5).half() self.assertEqual(x.to(device), x) xc = x.to(device) with tempfile.NamedTemporaryFile() as f: torch.save(xc, f) f.seek(0) xc2 = torch.load(f) self.assertIsInstance(xc2, type(xc)) self.assertEqual(xc.float(), xc2.float()) @onlyCUDA @deviceCountAtLeast(1) # Note: Tests works with one but prefers more devices def test_serialization(self, devices): def _test_serialization(filecontext_lambda): t0 = torch.cuda.FloatTensor(5).fill_(1) with torch.cuda.device(devices[-1]): tn = torch.cuda.FloatTensor(3).fill_(2) torch.cuda.set_device(devices[0]) b = (t0, tn) with filecontext_lambda() as f: torch.save(b, f) f.seek(0) c = torch.load(f) self.assertEqual(b, c, atol=0, rtol=0) u0, un = c self.assertEqual(str(u0.device), devices[0]) self.assertEqual(str(un.device), devices[-1]) _test_serialization(tempfile.NamedTemporaryFile) _test_serialization(BytesIOContext) def test_memory_format_preserved_after_permute(self, device): x = torch.randn(4, 3, 8, 8, device=device) nhwc = x.contiguous(memory_format=torch.channels_last) y = nhwc.permute(0, 1, 3, 2).permute(0, 1, 3, 2) self.assertTrue(y.is_contiguous(memory_format=torch.channels_last)) x = torch.randn(4, 3, 8, 8, 8, device=device) ndhwc = x.contiguous(memory_format=torch.channels_last_3d) y = ndhwc.permute(0, 1, 4, 3, 2).permute(0, 1, 4, 3, 2) self.assertTrue(y.is_contiguous(memory_format=torch.channels_last_3d)) def test_memory_format_propagation_rules(self, device): contiguous = torch.rand(10, 3, 5, 5, device=device) cl = torch.rand(10, 3, 5, 5, device=device).contiguous(memory_format=torch.channels_last) ambiguous = torch.rand(10, 3, 1, 1, device=device).contiguous(memory_format=torch.channels_last) self.assertTrue(ambiguous.is_contiguous(memory_format=torch.channels_last)) self.assertTrue(ambiguous.is_contiguous(memory_format=torch.contiguous_format)) bias = torch.rand(1, 1, 1, 1, device=device).contiguous(memory_format=torch.channels_last) def _test_propagation_rules(self, contiguous, cl, ambiguous, bias): options = ((ambiguous, contiguous, torch.contiguous_format), (ambiguous, cl, torch.channels_last), (contiguous, ambiguous, torch.contiguous_format), (contiguous, cl, torch.contiguous_format), (cl, ambiguous, torch.channels_last), (cl, contiguous, torch.channels_last), (bias, cl, torch.channels_last), (cl, bias, torch.channels_last),) for a, b, mf in options: result = a + b self.assertTrue(result.is_contiguous(memory_format=mf)) _test_propagation_rules(self, contiguous, cl, ambiguous, bias) cl = cl.to(memory_format=torch.channels_last) ambiguous = ambiguous.to(memory_format=torch.channels_last) bias = bias.to(memory_format=torch.channels_last) _test_propagation_rules(self, contiguous, cl, ambiguous, bias) # test cases when strides matter in ambiguous tensors for mf in (torch.channels_last, torch.contiguous_format): ambiguous = torch.rand(10, 3, 1, 1, device=device).to(memory_format=mf) bias = torch.rand(3, 1, 1, device=device) result = ambiguous + bias self.assertEqual(ambiguous.stride(), result.stride()) result = bias + ambiguous self.assertEqual(ambiguous.stride(), result.stride()) result = ambiguous * 5 self.assertEqual(ambiguous.stride(), result.stride()) def test_memory_format_empty_like(self, device): def test_helper(x, memory_format): xc = x.contiguous(memory_format=memory_format) like = torch.empty_like(xc, memory_format=torch.preserve_format) self.assertFalse(like.is_contiguous()) self.assertTrue(like.is_contiguous(memory_format=memory_format)) like_x = torch.empty_like(x, memory_format=torch.preserve_format) self.assertTrue(like_x.is_contiguous()) self.assertFalse(like_x.is_contiguous(memory_format=memory_format)) like = torch.empty_like(x, memory_format=memory_format) self.assertFalse(like.is_contiguous()) self.assertTrue(like.is_contiguous(memory_format=memory_format)) like = torch.empty_like(xc, memory_format=torch.contiguous_format) self.assertTrue(like.is_contiguous()) self.assertFalse(like.is_contiguous(memory_format=memory_format)) like = torch.empty_like(xc) self.assertFalse(like.is_contiguous()) self.assertTrue(like.is_contiguous(memory_format=memory_format)) sparse = x.to_sparse() with self.assertRaises(RuntimeError): z = torch.empty_like(sparse, memory_format=torch.preserve_format) test_helper(torch.randn(4, 3, 8, 8, device=device), torch.channels_last) test_helper(torch.randn(4, 3, 8, 8, 8, device=device), torch.channels_last_3d) def test_memory_format_consistency(self, device): x = torch.randn(10, 3, 1, 1, device=device) x_rep = x.as_strided(x.size(), x.stride()) self.assertEqual(x.size(), x_rep.size()) self.assertEqual(x.stride(), x_rep.stride()) self.assertEqual(x.is_contiguous(), x_rep.is_contiguous()) self.assertEqual(x.is_contiguous(memory_format=torch.channels_last), x_rep.is_contiguous(memory_format=torch.channels_last)) self.assertEqual( x.is_contiguous(memory_format=torch.channels_last_3d), x_rep.is_contiguous(memory_format=torch.channels_last_3d)) def test_memory_format_operators(self, device): def _chunk_op(x, y): x1, x2 = x.chunk(2, dim=1) return x1 + x2 def _unsqueeze_op_add(x, y): return x[0].unsqueeze(0) + 3 def _unsqueeze_op_clone(x, y): return x[0].unsqueeze(0).clone() def _test_helper(x, y, bias, memory_format): return_contig_fns = [ lambda x, y: y + x, lambda x, y: y * x, lambda x, y: y.addcdiv(x, y, value=2), lambda x, y: y.addcmul(x, y, value=2), ] bias_fns = [ lambda x, b: x + b, lambda x, b: b + x, ] fns = [ lambda x, y: x.clone(), lambda x, y: x + 3, lambda x, y: 3 * x, lambda x, y: x + y, lambda x, y: x * y, lambda x, y: abs(x), lambda x, y: x.abs(), lambda x, y: x.abs_(), lambda x, y: x.acos(), lambda x, y: x.acos_(), lambda x, y: x.add(y, alpha=3), lambda x, y: x.add_(y, alpha=3), lambda x, y: x.addcdiv(y, y, value=2), lambda x, y: x.addcdiv_(y, y, value=2), lambda x, y: x.addcmul(y, y, value=2), lambda x, y: x.addcmul_(y, y, value=2), lambda x, y: x.acosh(), lambda x, y: x.acosh_(), lambda x, y: x.asinh(), lambda x, y: x.asinh_(), lambda x, y: x.atanh(), lambda x, y: x.atanh_(), lambda x, y: x.asin(), lambda x, y: x.asin_(), lambda x, y: x.atan(), lambda x, y: x.atan2(y), lambda x, y: x.atan2_(y), lambda x, y: x.ceil(), lambda x, y: x.ceil_(), lambda x, y: x.clamp(-1, 1), lambda x, y: x.cos(), lambda x, y: x.cosh(), lambda x, y: x.div(0.5), lambda x, y: x.div_(0.5), lambda x, y: x.div(y), lambda x, y: x.div_(y), lambda x, y: x.digamma(), lambda x, y: x.digamma_(), lambda x, y: x.erf(), lambda x, y: x.erfc(), lambda x, y: x.erfinv(), lambda x, y: x.erfinv_(), lambda x, y: x.exp(), lambda x, y: x.expm1(), lambda x, y: x.expm1_(), lambda x, y: x.floor(), lambda x, y: x.floor_(), lambda x, y: x.fmod(2), lambda x, y: x.frac(), lambda x, y: x.hypot(y), lambda x, y: x.hypot_(y), lambda x, y: x.i0(), lambda x, y: x.i0_(), lambda x, y: x.lerp(y, 0.5), lambda x, y: x.log(), lambda x, y: x.log_(), lambda x, y: x.log10(), lambda x, y: x.log10_(), lambda x, y: x.log1p(), lambda x, y: x.log1p_(), lambda x, y: x.log2(), lambda x, y: x.log2_(), lambda x, y: x.mul(3), lambda x, y: x.mul_(3), lambda x, y: x.neg(), lambda x, y: x.neg_(), lambda x, y: x.pow(3), lambda x, y: x.pow_(3), lambda x, y: x.pow(0.0), lambda x, y: x.pow(1.0), lambda x, y: x.reciprocal(), lambda x, y: x.remainder(2), lambda x, y: x.round(), lambda x, y: x.round_(), lambda x, y: x.rsqrt(), lambda x, y: x.rsqrt_(), lambda x, y: x.sigmoid(), lambda x, y: x.sigmoid_(), lambda x, y: x.logit(), lambda x, y: x.logit_(), lambda x, y: x.logit(1e-6), lambda x, y: x.logit_(1e-6), lambda x, y: x.sign(), lambda x, y: x.sign_(), lambda x, y: x.sgn(), lambda x, y: x.sgn_(), lambda x, y: x.sin(), lambda x, y: x.sin_(), lambda x, y: x.sinh(), lambda x, y: x.sinh_(), lambda x, y: x.sqrt(), lambda x, y: x.sqrt_(), lambda x, y: x.tan(), lambda x, y: x.tanh(), lambda x, y: x.trunc(), lambda x, y: x.trunc_(), _chunk_op, _unsqueeze_op_add, _unsqueeze_op_clone, ] for fn in fns: x_c = x.contiguous() y_c = y.contiguous() result_c = fn(x_c, y_c) result = fn(x, y) self.assertEqual(result, result_c) self.assertTrue( result.is_contiguous(memory_format=memory_format), "result of the '{}' is not in '{}' format".format(inspect.getsource(fn).strip(), memory_format)) for fn in bias_fns: x_c = x.contiguous() b_c = bias.contiguous() result_c = fn(x_c, b_c) result = fn(x, bias) self.assertEqual(result, result_c) self.assertTrue( result.is_contiguous(memory_format=memory_format), "result of the '{}' is not in '{}' format".format(inspect.getsource(fn).strip(), memory_format)) for fn in return_contig_fns: x_c = x.contiguous() y_c = y.contiguous() result_c = fn(x_c, y_c) result = fn(x, y) self.assertEqual(result, result_c) self.assertTrue( result.is_contiguous(memory_format=torch.contiguous_format), "result of the '{}' is not in '{}' format".format(inspect.getsource(fn).strip(), torch.contiguous_format)) _test_helper( torch.randn((4, 3, 8, 8), device=device).contiguous(memory_format=torch.channels_last), abs(torch.randn((4, 3, 8, 8), device=device)) + 1, torch.randn((1, 3, 1, 1), device=device).contiguous(memory_format=torch.channels_last), torch.channels_last) _test_helper( torch.randn((4, 3, 8, 8, 8), device=device).contiguous(memory_format=torch.channels_last_3d), abs(torch.randn((4, 3, 8, 8, 8), device=device)) + 1, torch.randn((1, 3, 1, 1, 1), device=device).contiguous(memory_format=torch.channels_last_3d), torch.channels_last_3d) def test_strides_propagation(self, device): def _test_helper(x, op, unary=False): def compare_strides(s1, s2, div): sdiv = [s // div for s in s1] self.assertEqual(sdiv, s2) dim = x.dim() # we produce memory dense outputs, so when input is strided on the last dimension # we need to divide by that dimension stride to compare input and result strides div = x.stride(-1) for p in permutations(range(dim)): xp = x.permute(p) if not unary: y = torch.randn(xp.size(-1), device=x.device, dtype=x.dtype) for inputs in ((xp, xp), (xp, y), (y, xp)): res = op(*inputs) compare_strides(xp.stride(), res.stride(), div) self.assertEqual(xp.size(), res.size()) out = torch.empty(0, device=xp.device, dtype=res.dtype) res = op(*inputs, out=out) compare_strides(xp.stride(), res.stride(), div) self.assertEqual(xp.size(), res.size()) else: res = op(xp) compare_strides(xp.stride(), res.stride(), div) self.assertEqual(xp.size(), res.size()) out = torch.empty(0, device=xp.device, dtype=res.dtype) res = op(xp, out=out) compare_strides(xp.stride(), res.stride(), div) self.assertEqual(xp.size(), res.size()) # torch.eq by default calls TensorIterator with defined output, torch.add with undefined binary_ops = (torch.eq, torch.add) unary_ops = (torch.exp,) # memory dense, sliced and ambiguous sliced (ambiguous dense loses permutation information) xs = (torch.randn(2, 3, 4, device=device), torch.randn(2, 3, 8, device=device)[:, :, ::2], torch.randn(1, 1, 4, 12, device=device)[:, :, :, ::2]) for op in binary_ops: for x in xs: _test_helper(x, op) for op in unary_ops: for x in xs: _test_helper(x, op, unary=True) @skipMeta @dtypes(*torch.testing.get_all_dtypes()) def test_dlpack_conversion(self, device, dtype): # DLpack does not explicitly support bool # It does it through uint8 type if dtype is torch.bool: return x = make_tensor((5,), device, dtype, low=-9, high=9) z = from_dlpack(to_dlpack(x)) self.assertEqual(z, x) @onlyCUDA @unittest.skipIf(PYTORCH_CUDA_MEMCHECK, "is_pinned uses failure to detect pointer property") def test_pin_memory_from_constructor(self, device): def _get_like(t, **kwargs): return [ torch.rand_like(t, **kwargs), torch.randn_like(t, **kwargs), torch.empty_like(t, **kwargs), torch.full_like(t, 4, **kwargs), torch.zeros_like(t, **kwargs), torch.ones_like(t, **kwargs), ] def _get_tensors(**kwargs): return [ torch.tensor([10, 11], **kwargs), torch.randn(3, 5, **kwargs), torch.rand(3, **kwargs), # torch.randint(3, 5, **kwargs), // unsupported torch.zeros(3, **kwargs), torch.randperm(3, **kwargs), torch.empty(6, **kwargs), torch.ones(6, **kwargs), torch.eye(6, **kwargs), torch.arange(3, 5, **kwargs)] pinned_tensors = _get_tensors(pin_memory=True) + _get_like(torch.empty(5, dtype=torch.float64), pin_memory=True) for x in pinned_tensors: self.assertTrue(x.is_pinned()) tensors = _get_tensors() + _get_like(torch.empty(5, dtype=torch.float64, pin_memory=True)) for x in tensors: self.assertFalse(x.is_pinned()) def test_storage_device(self, device): x = torch.tensor([], device=device) self.assertEqual(x.dtype, x.storage().dtype) @deviceCountAtLeast(2) @onlyCUDA def test_storage_multigpu(self, devices): for device in devices: x = torch.tensor([], device=device) self.assertEqual(x.dtype, x.storage().dtype) @dtypesIfCUDA(torch.float, torch.double, torch.half) @dtypes(torch.float, torch.double) def test_multinomial(self, device, dtype): def make_prob_dist(shape, is_contiguous): if is_contiguous: if dtype == torch.half: return torch.zeros(shape, device=device).uniform_().to(dtype=torch.half) return torch.zeros(shape, device=device, dtype=dtype).uniform_() elif len(shape) == 1: if dtype == torch.half: return torch.zeros((shape + [5]), device=device).uniform_().to(dtype=torch.half)[:, 2] return torch.zeros((shape + [5]), device=device, dtype=dtype).uniform_()[:, 2] else: # num dim = 2 new_shape = [2, shape[1], 7, 1, shape[0], 1, 10] if dtype == torch.half: prob_dist = torch.zeros(new_shape, device=device).uniform_().to(dtype=torch.half) else: prob_dist = torch.zeros(new_shape, device=device, dtype=dtype).uniform_() prob_dist = prob_dist.transpose(1, 4) prob_dist = prob_dist[1, :, 5, 0, :, 0, 4] assert not prob_dist.is_contiguous() # sanity check return prob_dist for is_contiguous in (True, False): # with replacement n_row = 3 for n_col in range(4, 5 + 1): prob_dist = make_prob_dist([n_row, n_col], is_contiguous) # indices that shouldn't be sampled (<0 means none) zero_prob_indices = torch.LongTensor(n_row).random_(-2, n_col).tolist() for i, j in enumerate(zero_prob_indices): if j >= 0: prob_dist[i, j] = 0 n_sample = n_col * 3 sample_indices = torch.multinomial(prob_dist, n_sample, True) self.assertEqual(prob_dist.dim(), 2) self.assertEqual(sample_indices.size(1), n_sample) for i in range(n_row): zero_prob_idx = zero_prob_indices[i] if zero_prob_idx < 0: continue for j in range(n_sample): self.assertNotEqual(sample_indices[i, j], zero_prob_idx, msg="sampled an index with zero probability") # without replacement n_row = 3 for n_col in range(2, 10 + 1, 2): prob_dist = make_prob_dist([n_row, n_col], is_contiguous) # indices that shouldn't be sampled (<0 means none) zero_prob_indices = torch.LongTensor(n_row).random_(-1, n_col).tolist() for i, j in enumerate(zero_prob_indices): if j >= 0: prob_dist[i, j] = 0 n_sample = max(1, n_col - 2) sample_indices = torch.multinomial(prob_dist, n_sample, False) self.assertEqual(prob_dist.dim(), 2) self.assertEqual(sample_indices.size(1), n_sample) for i in range(n_row): row_samples = {} zero_prob_idx = zero_prob_indices[i] for j in range(n_sample): sample_idx = sample_indices[i, j] if zero_prob_idx >= 0: self.assertNotEqual(sample_idx, zero_prob_idx, msg="sampled an index with zero probability") self.assertNotIn(sample_idx, row_samples, "sampled an index twice") row_samples[sample_idx] = True # vector n_col = 4 prob_dist = make_prob_dist([n_col], is_contiguous).fill_(1) zero_prob_idx = 1 # index that shouldn't be sampled prob_dist[zero_prob_idx] = 0 n_sample = 20 sample_indices = torch.multinomial(prob_dist, n_sample, True) for sample_index in sample_indices: self.assertNotEqual(sample_index, zero_prob_idx, msg="sampled an index with zero probability") s_dim = sample_indices.dim() self.assertEqual(sample_indices.dim(), 1, msg="wrong number of dimensions") self.assertEqual(prob_dist.dim(), 1, msg="wrong number of prob_dist dimensions") self.assertEqual(sample_indices.size(0), n_sample, msg="wrong number of samples") # CUDA misalignment issue (#46702) n_row, n_col = 2, 3 prob_dist = make_prob_dist([n_row, n_col], True) n_sample = 1 sample_indices = torch.multinomial(prob_dist, n_sample, True) self.assertEqual(sample_indices.dim(), 2, msg="wrong number of dimensions") self.assertEqual(sample_indices.size(1), n_sample, msg="wrong number of samples") @onlyCUDA @dtypes(torch.float, torch.double, torch.half) def test_multinomial_deterministic(self, device, dtype): gen = torch.Generator(device=device) trials = 5 seed = 0 prob_dist = torch.rand(10000, 1000, device=device, dtype=dtype) n_sample = 1 for i in range(trials): gen.manual_seed(seed) samples_1 = torch.multinomial(prob_dist, n_sample, True, generator=gen) gen.manual_seed(seed) samples_2 = torch.multinomial(prob_dist, n_sample, True, generator=gen) self.assertEqual(samples_1, samples_2) self.assertEqual(samples_1.dim(), 2, msg="wrong number of dimensions") self.assertEqual(samples_1.size(1), n_sample, msg="wrong number of samples") @slowTest @dtypes(torch.float) def test_multinomial_rng_state_advance(self, device, dtype): corpus_size = 100000 freqs = torch.ones(corpus_size, dtype=torch.float, device=device) n_sample = 100 samples1 = torch.multinomial(freqs, n_sample, replacement=True) samples2 = torch.multinomial(freqs, n_sample, replacement=True) samples = torch.cat([samples1, samples2]) # expect no more than 1 repeating elements generated in 2 attempts # the probability of at least element being repeated is surprisingly large, 18% self.assertLessEqual(2 * n_sample - samples.unique().size(0), 2) samples1 = torch.multinomial(freqs, n_sample, replacement=False) samples2 = torch.multinomial(freqs, n_sample, replacement=False) samples = torch.cat([samples1, samples2]) # expect no more than 1 repeating elements generated in 2 attempts self.assertLessEqual(2 * n_sample - samples.unique().size(0), 1) def _test_memory_format_transformations(self, device, input_generator_fn, transformation_fn, memory_format, compare_data=True, default_is_preserve=False): assert(memory_format == torch.channels_last or memory_format == torch.channels_last_3d) # xc is a channels last tensor xc = input_generator_fn(device) # xc is not memory dense, but looks like channels last if memory_format == torch.channels_last: xc = xc[..., ::2, ::2] else: xc = xc[..., ::2, ::2, ::2] clone = transformation_fn(xc, memory_format=torch.preserve_format) self.assertFalse(clone.is_contiguous()) self.assertTrue(clone.is_contiguous(memory_format=memory_format)) self.assertFalse(xc.is_contiguous()) self.assertFalse(xc.is_contiguous(memory_format=memory_format)) if compare_data: self.assertEqual(xc, clone.to(xc)) xc = input_generator_fn(device) clone = transformation_fn(xc, memory_format=torch.contiguous_format) self.assertTrue(clone.is_contiguous()) self.assertFalse(clone.is_contiguous(memory_format=memory_format)) if compare_data: self.assertEqual(xc, clone.to(xc)) xc = input_generator_fn(device) clone = transformation_fn(xc) if default_is_preserve: self.assertFalse(clone.is_contiguous()) self.assertTrue(clone.is_contiguous(memory_format=memory_format)) else: self.assertTrue(clone.is_contiguous()) self.assertFalse(clone.is_contiguous(memory_format=memory_format)) if compare_data: self.assertEqual(xc, clone.to(xc)) x = torch.randn((3, 4, 5, 6, 7, 8, 9), device=device) for _ in range(10): permutation = list(range(len(x.shape))) random.shuffle(permutation) x = x.permute(permutation) self.assertEqual(x.stride(), transformation_fn(x, memory_format=torch.preserve_format).stride()) def test_memory_format_to(self, device): def get_generator(memory_format, shape): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format) return input_generator_fn def transformation_fn(tensor, **kwargs): return tensor.to(dtype=torch.float64, **kwargs) formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape in formats_shapes: self._test_memory_format_transformations( device, get_generator(mf, shape), transformation_fn, mf, default_is_preserve=True) def test_memory_format_type(self, device): def get_generator(memory_format, shape): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format) return input_generator_fn def transformation_fn(tensor, **kwargs): return tensor.to(torch.float64, **kwargs) formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape in formats_shapes: self._test_memory_format_transformations( device, get_generator(mf, shape), transformation_fn, mf, default_is_preserve=True) def test_memory_format_clone(self, device): def get_generator(memory_format, shape): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format) return input_generator_fn def transformation_fn(tensor, **kwargs): return tensor.clone(**kwargs) formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape in formats_shapes: self._test_memory_format_transformations( device, get_generator(mf, shape), transformation_fn, mf, True, default_is_preserve=True) def test_memory_format_factory_like_functions_preserve(self, device): def get_generator(memory_format, shape): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format) return input_generator_fn transformation_fns = [ lambda t, **kwargs: torch.zeros_like(t, **kwargs), lambda t, **kwargs: torch.ones_like(t, **kwargs), lambda t, **kwargs: torch.randint_like(t, 10, 100, **kwargs), lambda t, **kwargs: torch.randint_like(t, 100, **kwargs), lambda t, **kwargs: torch.randn_like(t, **kwargs), lambda t, **kwargs: torch.rand_like(t, **kwargs), lambda t, **kwargs: torch.full_like(t, 7, **kwargs), lambda t, **kwargs: torch.empty_like(t, **kwargs)] formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape, in formats_shapes: for transformation_fn in transformation_fns: self._test_memory_format_transformations( device, get_generator(mf, shape), transformation_fn, mf, compare_data=False, default_is_preserve=True) def test_memory_format_type_shortcuts(self, device): def get_generator(memory_format, shape, dtype): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=dtype).clamp(0, 1) \ .round().contiguous(memory_format=memory_format) return input_generator_fn def get_fn(fn_name): def transformation_fn(tensor, **kwargs): fn = getattr(tensor, fn_name) return fn(**kwargs) return transformation_fn shortcuts = ['byte', 'char', 'double', 'bool', 'half', 'int', 'long', 'short'] if device == 'cpu': shortcuts += ['bfloat16'] formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape in formats_shapes: for fn_name in shortcuts: self._test_memory_format_transformations( device, get_generator(mf, shape, torch.float32), get_fn(fn_name), mf, default_is_preserve=True) # Test 'float' separately to avoid float->float no-op. for mf, shape in formats_shapes: self._test_memory_format_transformations( device, get_generator(mf, shape, torch.float64), get_fn('float'), mf, default_is_preserve=True) @onlyCUDA def test_memory_format_cpu_and_cuda_ops(self, device): def get_generator(memory_format, shape): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format) return input_generator_fn def transformation_cpu_fn(tensor, **kwargs): return tensor.cpu(**kwargs) def transformation_cuda_fn(tensor, **kwargs): return tensor.cuda(**kwargs) formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape in formats_shapes: self._test_memory_format_transformations( 'cuda', get_generator(mf, shape), transformation_cpu_fn, mf, default_is_preserve=True) self._test_memory_format_transformations( 'cpu', get_generator(mf, shape), transformation_cuda_fn, mf, default_is_preserve=True) @dtypes(torch.complex64, torch.complex128) def test_complex_unsupported(self, device, dtype): t = torch.tensor((1 + 1j), device=device, dtype=dtype) # Note: this is consistent with NumPy with self.assertRaises(RuntimeError): torch.floor(t) with self.assertRaises(RuntimeError): torch.ceil(t) with self.assertRaises(RuntimeError): torch.trunc(t) # Tests min and max variants with complex inputs # Note: whether PyTorch should support min and max on complex # tensors is an open question. # See https://github.com/pytorch/pytorch/issues/36374 with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.min(t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): t.min() with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.min(t, dim=0) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.min(t, t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.min(t, t, out=t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.max(t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): t.max() with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.max(t, dim=0) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.max(t, t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.max(t, t, out=t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.amin(t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): t.amin() with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.amin(t, dim=0) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.amax(t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): t.amax() with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.amax(t, dim=0) # Tests _aminmax() variants with complex inputs, # which are currently not supported due to min & max being unsupported # for complex inputs, as per https://github.com/pytorch/pytorch/issues/36374 # Test with a single-element tensor t, as well as a multi-element tensor x with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): min_val, max_val = torch._aminmax(t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): min_val = torch._aminmax(t, dim=0)[0] with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): max_val = torch._aminmax(t, dim=0)[1] # Test _aminmax() with a multi-element tensor x = torch.tensor([(1 + 1j), (2 + 3j)], device=device, dtype=dtype) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): min_val, max_val = torch._aminmax(x) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): min_val = torch._aminmax(x, dim=0)[0] with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): max_val = torch._aminmax(x, dim=0)[1] # Tests clamp variants with complex inputs # Note: whether PyTorch should support clamp on complex # tensors is an open question. # See https://github.com/pytorch/pytorch/issues/33568 min_val = 1 + 1j max_val = 4 + 4j out = torch.empty((0,), device=device, dtype=dtype) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.clamp(t, min=min_val) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.clamp(t, max=max_val) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.clamp(t, min_val, max_val) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.clamp(t, min=min_val, out=out) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.clamp(t, max=max_val, out=out) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.clamp(t, min_val, max_val, out=out) def test_pickle_gradscaler(self, device): # This test is not in test_cuda.py because it should pass in 3 cases: # 1. cuda is not available. # 2. cuda is available but device is not cuda. # 3. cuda is available and device is cuda. # In case 1, a and b disable themselves on construction and shouldn't try to pickle workhorse attributes. # In case 2, a and b are enabled. Workhorse attributes participate in pickling, but none are lazy-inited # to cuda Tensors, because I don't want to do cuda things if device is not cuda. # In case 3, a and b are enabled and we may also try lazy-initing _scale to a cuda tensor. device = torch.device(device) try_lazy_inits = (True, False) if device.type == "cuda" else (False,) for lazy_init_scale in try_lazy_inits: a = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2) self.assertTrue(not a.is_enabled() if torch.cuda.amp.common.amp_definitely_not_available() else a.is_enabled()) if lazy_init_scale: # Dummy a.scale() call lazy-inits a._scale Tensor. a.scale(torch.tensor([4.0], dtype=torch.float32, device=device)) self.assertTrue(isinstance(a._scale, torch.cuda.FloatTensor)) # The following three lines should work whether or not cuda is available. serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertEqual(b.is_enabled(), a.is_enabled()) if a.is_enabled(): self.assertEqual(b.get_scale(), 3.) self.assertEqual(b.get_growth_factor(), 4.) self.assertEqual(b.get_backoff_factor(), .5) self.assertEqual(b.get_growth_interval(), 2) self.assertEqual(b._init_growth_tracker, 0) # supplies a dummy key to test the defaultdict's default_factory self.assertEqual(b._per_optimizer_states["fdsa"], torch.cuda.amp.grad_scaler._refresh_per_optimizer_state()) if lazy_init_scale: self.assertEqual(b.scale(torch.tensor([4.0], dtype=torch.float32, device=device)), 12.0) def test_multinomial_invalid(self, device): def test(probs): with self.assertRaisesRegex(RuntimeError, 'probability tensor contains either `inf`, `nan` or element < 0'): torch.multinomial(probs.to(device), 2) torch.cuda.synchronize() test(torch.tensor([1., -1., 1.])) test(torch.tensor([1., inf, 1.])) test(torch.tensor([1., -inf, 1.])) test(torch.tensor([1., 1., nan])) def test_multinomial_invalid_distribution(self, device): def test(probs, replacement): with self.assertRaisesRegex(RuntimeError, r"invalid multinomial distribution \(sum of probabilities <= 0\)"): torch.multinomial(probs, 2, replacement) torch.cuda.synchronize() x = torch.zeros(3, device=device) y = torch.zeros(3, 3, device=device) z = torch.zeros(3, 3, device=device) z[1, :] = 1 test(x, False) test(y, False) test(z, False) # Verify only for CPU as replacement=True # throws device side assert triggered. if self.device_type == 'cpu': test(x, True) test(y, True) test(z, True) def _test_multinomial_empty(self, device, replacement, num_samples): probs = torch.ones(0, 3, device=device) expected = torch.empty(0, num_samples, dtype=torch.int64) out = torch.multinomial(probs, num_samples=num_samples, replacement=replacement) self.assertEqual(out, expected) def test_multinomial_empty_w_replacement(self, device): self._test_multinomial_empty(device, True, 1) self._test_multinomial_empty(device, True, 2) def test_multinomial_empty_wo_replacement(self, device): self._test_multinomial_empty(device, False, 1) self._test_multinomial_empty(device, False, 2) def _generate_input(self, shape, dtype, device, with_extremal): if shape == (): x = torch.tensor((), dtype=dtype, device=device) else: if dtype.is_floating_point or dtype.is_complex: # work around torch.randn not being implemented for bfloat16 if dtype == torch.bfloat16: x = torch.randn(*shape, device=device) * random.randint(30, 100) x = x.to(torch.bfloat16) else: x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(30, 100) x[torch.randn(*shape) > 0.5] = 0 if with_extremal and dtype.is_floating_point: # Use extremal values x[torch.randn(*shape) > 0.5] = float('nan') x[torch.randn(*shape) > 0.5] = float('inf') x[torch.randn(*shape) > 0.5] = float('-inf') elif with_extremal and dtype.is_complex: x[torch.randn(*shape) > 0.5] = complex('nan') x[torch.randn(*shape) > 0.5] = complex('inf') x[torch.randn(*shape) > 0.5] = complex('-inf') elif dtype == torch.bool: x = torch.zeros(shape, dtype=dtype, device=device) x[torch.randn(*shape) > 0.5] = True else: x = torch.randint(15, 100, shape, dtype=dtype, device=device) return x def _test_where_scalar_template(self, device, dtype, exec_fn): for with_extremal in [True, False]: for ndims in range(0, 4): shape = self._rand_shape(ndims, min_size=5, max_size=10) for n in range(ndims + 1): for c in combinations(list(range(ndims)), n): for scalar_type in [int, float, complex]: if dtype.is_complex: condition = self._generate_input(shape, dtype, device, with_extremal).abs() > 0.5 else: condition = self._generate_input(shape, dtype, device, with_extremal) > 0.5 x = self._generate_input(shape, dtype, device, with_extremal) if not dtype.is_complex and scalar_type == complex: continue scalar_1 = scalar_type(random.random()) exec_fn(scalar_type, dtype, condition, x, scalar_1) # For current implementation, # below are the valid `TensorDtype` and `ScalarType` combinations. def _where_valid_scalar_tensor_combination(self, scalar_type, dtype): if (scalar_type == int and dtype == torch.long): return True elif (scalar_type == float and dtype == torch.double): return True elif (scalar_type == complex and dtype == torch.complex128): return True return False @onlyOnCPUAndCUDA @dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes() + torch.testing.get_all_complex_dtypes())) def test_where_scalar_invalid_combination_raises(self, device, dtype): def checkRaises(scalar_type, dtype, condition, x, scalar_1): if not self._where_valid_scalar_tensor_combination(scalar_type, dtype): # Note: This should fail once `where` supports type promotion. with self.assertRaisesRegex(RuntimeError, "expected scalar type"): torch.where(condition, x, scalar_1) self._test_where_scalar_template(device, dtype, checkRaises) @skipCUDAVersionIn([(11, 2)]) # test fails for 11.2, see https://github.com/pytorch/pytorch/issues/51980 @dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes() + torch.testing.get_all_complex_dtypes())) def test_where_scalar_valid_combination(self, device, dtype): def checkResult(scalar_type, dtype, condition, x, scalar_1): if self._where_valid_scalar_tensor_combination(scalar_type, dtype): def x_like(scalar, without_dtype=False): return torch.tensor(scalar, dtype=dtype, device=device).expand_as(x) # X = Tensor, Y = Scalar scalar_out = torch.where(condition, x, scalar_1) tensor_out = torch.where(condition, x, x_like(scalar_1)) self.assertEqual(scalar_out, tensor_out) # X = Scalar, Y = Tensor scalar_out = torch.where(condition, scalar_1, x) tensor_out = torch.where(condition, x_like(scalar_1), x) self.assertEqual(scalar_out, tensor_out) self._test_where_scalar_template(device, dtype, checkResult) # As the test fails with Runtime Error not raised on XLA @onlyOnCPUAndCUDA def test_where_scalar_scalar(self, device): # Scalar-Scalar Version height = 5 width = 5 default_dtype = torch.get_default_dtype() for test_default_dtype in [torch.float, torch.double]: torch.set_default_dtype(test_default_dtype) for scalar_type_1 in [int, float, complex]: for scalar_type_2 in [int, float, complex]: x1 = scalar_type_1(random.random() * random.randint(10, 20)) x2 = scalar_type_2(random.random() * random.randint(20, 30)) condition = torch.randn(height, width, device=device) > 0.5 if scalar_type_1 != scalar_type_2: self.assertRaisesRegex(RuntimeError, "expected scalar type", lambda: torch.where(condition, x1, x2)) else: def get_dtype(scalar_type): complex_dtype = torch.complex64 if torch.float == torch.get_default_dtype() else torch.complex128 type_map = {int: torch.long, float: torch.get_default_dtype(), complex: complex_dtype} return type_map[scalar_type] expected = torch.zeros((height, width), dtype=get_dtype(scalar_type_1)) expected[condition] = x1 expected[~condition] = x2 result = torch.where(condition, x1, x2) self.assertEqual(expected, result) # Reset the original dtype torch.set_default_dtype(default_dtype) # Tests that compare a device's computation with the (gold-standard) CPU's. class TestDevicePrecision(TestCase): exact_dtype = True @onlyCUDA def test_index_add_bfloat16(self, device): inp_tensor = torch.randn(5, 3, device='cpu').bfloat16() t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.bfloat16, device='cpu') index = torch.tensor([0, 4, 2], device='cpu') out_cpu = inp_tensor.index_add(0, index, t) inp_tensor = inp_tensor.to(device=device) t = t.to(device=device) index = index.to(device=device) out_gpu = inp_tensor.index_add(0, index, t) self.assertEqual(out_cpu, out_gpu, atol=1e-2, rtol=0) def test_device_serialization(self, device): x = torch.randn(4, 4, device=device) with tempfile.NamedTemporaryFile() as f: torch.save(x, f) f.seek(0) x_copy = torch.load(f) self.assertEqual(x_copy, x) self.assertIs(type(x_copy), type(x)) self.assertEqual(x_copy.device, x.device) @deviceCountAtLeast(2) def test_multidevice_serialization(self, devices): x = [torch.randn(4, 4, device=devices[0]), torch.randn(4, 4, device=devices[1])] with tempfile.NamedTemporaryFile() as f: torch.save(x, f) f.seek(0) x_copy = torch.load(f) for original, cp in zip(x, x_copy): self.assertEqual(cp, original) self.assertIs(type(cp), type(original)) self.assertEqual(cp.device, original.device) @deviceCountAtLeast(1) def test_copy_noncontig(self, devices): def do_test(d0, d1): x = torch.tensor([1.5, 2.5, 3.5, 4.5, 5.5, 6.5], device=d0) y = torch.tensor([0, 0, 0, 0, 0, 0], device=d1) self.assertNotEqual(x.dtype, y.dtype) y[::2].copy_(x[::2]) self.assertEqual(y, [1, 0, 3, 0, 5, 0]) do_test('cpu', devices[0]) do_test(devices[0], 'cpu') if len(devices) > 1: do_test(devices[0], devices[1]) @deviceCountAtLeast(2) def test_type_conversions_same_device(self, devices): x = torch.randn(5, 5, device=devices[1]) self.assertEqual(x.int().device, torch.device(devices[1])) self.assertEqual(x.type(torch.int).device, torch.device(devices[1])) self.assertEqual(x.to(torch.int).device, torch.device(devices[1])) @dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.short, torch.int, torch.long, torch.uint8) @dtypes(torch.float, torch.double, torch.int8, torch.short, torch.int, torch.long, torch.uint8) def test_from_sequence(self, device, dtype): seq = [list(range(i * 4, i * 4 + 4)) for i in range(5)] reference = torch.arange(0, 20).resize_(5, 4) self.assertEqual(torch.tensor(seq, dtype=dtype, device=device), reference, exact_dtype=False) @deviceCountAtLeast(1) def test_advancedindex_mixed_cpu_devices(self, devices) -> None: def test(x: torch.Tensor, ia: torch.Tensor, ib: torch.Tensor) -> None: # test getitem self.assertEqual(x[:, ia, None, ib, 0].cpu(), x.cpu()[:, ia.cpu(), None, ib.cpu(), 0]) self.assertEqual(x[ia], x.cpu()[ia.cpu()]) # test setitem x_clone1 = x.clone() x_clone2 = x.clone() first_shape = x[:, ia, None, ib, 0].shape second_shape = x[ia].shape x_clone1[:, ia, None, ib, 0] = torch.randn(first_shape).to(x_clone1) x_clone2[ia] = torch.randn(second_shape).to(x_clone2) cpu = torch.device('cpu') for device in devices: # Index cpu tensor with device tensor x = torch.randn(3, 4, 4, 4, 3) ia = torch.tensor([0, 2, 1]).to(device) ib = torch.tensor([0, 2, 1]).to(device) test(x, ia, ib) # Index device tensor with cpu tensor x = x.to(device) ia = ia.to(cpu) ib = ib.to(cpu) test(x, ia, ib) # Index cpu tensor with mixed cpu, device tensors x = x.to(cpu) ia = ia.to(cpu) ib = ib.to(device) test(x, ia, ib) # Index device tensor with mixed cpu, device tensors x = x.to(device) ia = ia.to(cpu) ib = ib.to(device) test(x, ia, ib) if len(devices) > 1: other_device = devices[0] if device == devices[0]: other_device = devices[1] # Index device tensor with mixed cpu, device tensors on different devices x = x.to(device) ia = ia.to(cpu) ib = ib.to(other_device) test(x, ia, ib) def test_copy_broadcast(self, device) -> None: x = torch.randn(10, 5) y = torch.randn(5, device=device) x.copy_(y) self.assertEqual(x[3], y) x = torch.randn(10, 5, device=device) y = torch.randn(5) x.copy_(y) self.assertEqual(x[3], y) @dtypes(torch.int64, torch.float32, torch.float64) def test_clamp(self, device, dtype): test_args = [ *product( [(100, 50), (10, 64), (97,)], # shape (True, False), # non-contiguous ) ] for shape, noncontig in test_args: x = make_tensor(shape, device=device, dtype=dtype, noncontiguous=noncontig) ub = make_tensor(shape, device=device, dtype=dtype, noncontiguous=noncontig) lb = make_tensor(shape, device=device, dtype=dtype, noncontiguous=noncontig) expect = x.max(lb).min(ub) actual = x.clamp(lb, ub) self.assertEqual(expect, actual) expect = np.clip(x.cpu().numpy(), lb.cpu().numpy(), ub.cpu().numpy()) self.assertEqual(expect, actual) expect = x.max(lb) actual = x.clamp(min=lb) self.assertEqual(expect, actual) expect = x.min(ub) actual = x.clamp(max=ub) self.assertEqual(expect, actual) # Test broadcasting min & max expect = x.max(lb[0]).min(ub[..., :1]) actual = x.clamp(lb[0], ub[..., :1]) self.assertEqual(expect, actual) # Test broadcasting x expect = x[..., :1].max(lb).min(ub) actual = x[..., :1].clamp(lb, ub) self.assertEqual(expect, actual) # we implemented custom deallocation for subclasses, so it behooves # us to make sure all of these bits work. We'll use __del__ to # track if objects die or not class Tracker: def __init__(self, marker): self.marker = marker @staticmethod def make(): marker = [False] return marker, Tracker(marker) def __del__(self): self.marker[0] = True @contextlib.contextmanager def disable_gc(): if gc.isenabled(): try: gc.disable() yield finally: gc.enable() else: yield class TestTorch(AbstractTestCases._TestTorchMixin): exact_dtype = True def test_tensor_ctor_scalar(self): x = torch.Tensor(torch.tensor(1.0)) self.assertEqual(x, torch.tensor(1.0)) def test_deepcopy_gradient(self): from copy import deepcopy a = torch.zeros(10) a.grad = torch.ones(10) self.assertEqual(a.grad, deepcopy(a).grad) s = torch.zeros(10).to_sparse() s.grad = torch.ones(10).to_sparse() self.assertEqual(s.grad, deepcopy(s).grad) # ensure sharing is not broken c = deepcopy([a, a.grad]) self.assertTrue(c[0].grad is c[1]) def test_tensor_base_init(self): # Direct construction not OK self.assertRaises(RuntimeError, lambda: torch._C._TensorBase()) # But construction of subclass is OK class T(torch._C._TensorBase): pass T() def test_tensor_base_new(self): # OK to call super().__new__, see # https://github.com/pytorch/pytorch/issues/57421 class TestTensor(torch._C._TensorBase): @staticmethod def __new__(cls, x, *args, **kwargs): return super().__new__(cls, x, *args, **kwargs) x = torch.ones(5) test_tensor = TestTensor(x) def test_pyobj_preserved(self): x = torch.empty(2) x.foo = 2 # put something on __dict__ y = torch.empty(2) y.grad = x del x # x is dead in Python self.assertEqual(y.grad.foo, 2) z = y.grad # it's live del z # it's dead again self.assertEqual(y.grad.foo, 2) def test_subclass_preserved(self): class MyTensor(torch._C._TensorBase): pass x = MyTensor(torch.empty(2)) y = torch.empty(2) y.grad = x del x # x is dead in Python self.assertEqual(type(y.grad), MyTensor) z = y.grad # it's live del z # it's dead again self.assertEqual(type(y.grad), MyTensor) def test_tensor_slot_dealloc(self): class SlotTensor1(torch._C._TensorBase): __slots__ = ['slot1'] class SlotTensor2(SlotTensor1): __slots__ = ['slot2'] m1, t1 = Tracker.make() m2, t2 = Tracker.make() slot_tensor = SlotTensor2(torch.empty(2)) slot_tensor.slot1 = t1 slot_tensor.slot2 = t2 del t1 del t2 self.assertFalse(m1[0]) self.assertFalse(m2[0]) del slot_tensor self.assertTrue(m1[0]) self.assertTrue(m2[0]) def test_tensor_dict_dealloc(self): m, t = Tracker.make() x = torch.empty(2) x.arf = t del t self.assertFalse(m[0]) del x self.assertTrue(m[0]) def test_tensor_finalizer_dealloc(self): m = [False] class FinalizerTensor(torch._C._TensorBase): def __del__(self): m[0] = True fin_tensor = FinalizerTensor(torch.empty(2)) self.assertFalse(m[0]) del fin_tensor self.assertTrue(m[0]) def test_tensor_weakref_dealloc(self): x = torch.empty(2) m = [False] def cb(r): m[0] = True wref = weakref.ref(x, cb) del x self.assertTrue(m[0]) self.assertEqual(wref(), None) def test_tensor_cycle_via_dict(self): m1, t1 = Tracker.make() x = torch.empty(2) x._tracker = t1 del t1 m2, t2 = Tracker.make() y = torch.empty(2) y._tracker = t2 del t2 x._loop = y y._loop = x # C++ reference should keep the cycle live! # This exercise THPVariable_subtype_traverse # NB: Because z.grad is a reference done entirely in C++, cycles # involving it directly are NOT broken by Python GC; you've # set up a good old C++ reference cycle which we cannot safely # break (because C++ references are allowed to be accessed # multithreaded-ly) (TODO: except maybe if you can prove that # only Python has access to the C++ object, in which case you can # also prove that no multithreaded access occurs) z = torch.empty(2) z.grad = x del x del y gc.collect() self.assertFalse(m1[0]) self.assertFalse(m2[0]) with disable_gc(): del z self.assertFalse(m1[0]) self.assertFalse(m2[0]) gc.collect() self.assertTrue(m1[0]) self.assertTrue(m2[0]) def test_tensor_cycle_via_slots(self): m1 = [False] m2 = [False] class SlotTensor1(torch._C._TensorBase): __slots__ = ['slot1'] def __del__(self): m1[0] = True class SlotTensor2(SlotTensor1): __slots__ = ['slot2'] def __del__(self): m2[0] = True x = SlotTensor1(torch.empty(2)) y = SlotTensor2(torch.empty(2)) x.slot1 = y y.slot2 = x del x with disable_gc(): del y self.assertFalse(m1[0]) self.assertFalse(m2[0]) gc.collect() self.assertTrue(m1[0]) self.assertTrue(m2[0]) def test_backward_hooks_traverse(self): m1, t1 = Tracker.make() m2, t2 = Tracker.make() x = torch.empty(2, requires_grad=True) x._tracker = t1 y = torch.empty(2, requires_grad=True) y._tracker = t2 del t1 del t2 # this hits a special setter, it's not just a __dict__ entry x._backward_hooks = y y._backward_hooks = x del x with disable_gc(): del y self.assertFalse(m1[0]) self.assertFalse(m2[0]) gc.collect() self.assertTrue(m1[0]) self.assertTrue(m2[0]) def test_dead_weak_ref(self): x = torch.empty(2) w_x = weakref.ref(x) y = torch.empty(2) y.grad = x del x x = w_x() # Ideally, x would keep the tensor live. But CPython doesn't # provide enough hooks to do this. So it will go dead and x # will transmute into an undefined tensor. Not great, but the # best we can do. del y self.assertRaises(RuntimeError, lambda: x.sigmoid()) def test_resurrected_weak_ref(self): x = torch.empty(2) w_x = weakref.ref(x) y = torch.empty(2) y.grad = x del x x = w_x() # Use this to manually fix weak references after dereferencing them x._fix_weakref() del y x.sigmoid() @torch.inference_mode() def test_bmm_multithreaded(self): device = 'cpu' num_threads = torch.get_num_threads() torch.set_num_threads(4) batch_sizes = [1, 10] M, N, O = 23, 8, 12 dtype = torch.float32 numpy_dtype = dtype def invert_perm(p): d = {x: i for i, x in enumerate(p)} return (d[0], d[1], d[2]) def generate_inputs(num_batches): # transposed tensors for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2): b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1) b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1) b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1)) b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2)) yield b1, b2 # broadcasting tensors for b1, b2, b3, b4, b5, b6 in itertools.product((True, False), repeat=6): shape1 = (num_batches if b1 else 1, M if b2 else 1, N if b3 else 1) shape2 = (num_batches if b4 else 1, N if b5 else 1, O if b6 else 1) b1 = make_tensor(shape1, device, dtype, low=-1, high=1).expand(num_batches, M, N) b2 = make_tensor(shape2, device, dtype, low=-1, high=1).expand(num_batches, N, O) yield b1, b2 # zero-sized tensors for z1, z2, z3, z4 in itertools.product((True, False), repeat=4): shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0) shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0) b1 = torch.randn(shape1, dtype=dtype, device=device) b2 = torch.randn(shape2, dtype=dtype, device=device) yield b1, b2 try: for num_batches in batch_sizes: for (b1, b2), perm3 in itertools.product(generate_inputs(num_batches), itertools.permutations((0, 1, 2))): res1 = torch.bmm(b1, b2) res2 = torch.full((num_batches, M, O), math.nan, dtype=dtype, device=device) \ .permute(perm3).contiguous().permute(invert_perm(perm3)) torch.bmm(b1, b2, out=res2) expect = torch.from_numpy( b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype) self.assertEqual(expect, res1) self.assertEqual(expect, res2) finally: torch.set_num_threads(num_threads) # TODO: these empy classes are temporarily instantiated for XLA compatibility # once XLA updates their test suite it should be removed class TestViewOps(TestCase): pass class TestTensorDeviceOps(TestCase): pass # Generates tests # Note: test generation must be done at file scope, not within main, or # pytest will fail. add_neg_dim_tests() instantiate_device_type_tests(TestViewOps, globals()) instantiate_device_type_tests(TestVitalSignsCuda, globals()) instantiate_device_type_tests(TestTensorDeviceOps, globals()) instantiate_device_type_tests(TestTorchDeviceType, globals()) instantiate_device_type_tests(TestDevicePrecision, globals(), except_for='cpu') if __name__ == '__main__': run_tests()
# -*- coding: utf-8 -*- import torch import numpy as np import contextlib import gc import io import inspect import itertools import math import random import re import copy import os import tempfile import unittest import warnings import types import pickle import textwrap import subprocess import weakref import sys from torch.utils.dlpack import from_dlpack, to_dlpack from torch._six import inf, nan, string_classes from itertools import product, combinations, permutations from functools import partial from torch import multiprocessing as mp from torch.testing._internal.common_utils import ( TestCase, TEST_WITH_ROCM, run_tests, IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN, do_test_dtypes, IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, slowTest, skipCUDAMemoryLeakCheckIf, BytesIOContext, noarchTest, skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName, wrapDeterministicFlagAPITest, DeterministicGuard, make_tensor) from multiprocessing.reduction import ForkingPickler from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, skipCUDAVersionIn, onlyCUDA, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast, skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyOnCPUAndCUDA, expectedAlertNondeterministic) from typing import Dict, List, Tuple import torch.backends.quantized import torch.testing._internal.data from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32 # Protects against includes accidentally setting the default dtype assert torch.get_default_dtype() is torch.float32 # load_tests from torch.testing._internal.common_utils is used to automatically filter tests for # sharding on sandcastle. This line silences flake warnings load_tests = load_tests AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() # Wrap base test class into a class to hide it from testing # See https://stackoverflow.com/a/25695512 class AbstractTestCases: # This is intentionally prefixed by an underscore. Otherwise pytest will try to # run its methods as test cases. class _TestTorchMixin(TestCase): def _make_tensors(self, shape, val_range=(-100, 100), use_floating=True, use_integral=True, use_complex=False) -> Dict[str, List[torch.Tensor]]: float_types = [torch.double, torch.float] int_types = [torch.int64, torch.int32, torch.int16] complex_types = [torch.complex64, torch.complex128] def make_contiguous(shape, dtype) -> torch.Tensor: if dtype in float_types: val = torch.randn(shape, dtype=dtype) val = val * ((val_range[1] - val_range[0]) / (math.pi * 2.0)) val = val + ((val_range[1] - val_range[0]) / 2.0) val = torch.clamp(val, min=val_range[0], max=val_range[1]) return val result = torch.zeros(shape, dtype=dtype) result.apply_(lambda x: random.randint(val_range[0], val_range[1])) return result def make_non_contiguous(shape, dtype) -> torch.Tensor: contig = make_contiguous(shape, dtype) non_contig = torch.empty(shape + (2, 2), dtype=dtype)[..., 0] non_contig = non_contig.select(-1, -1) non_contig.copy_(contig) self.assertFalse(non_contig.is_contiguous()) return non_contig def make_contiguous_slice(size, dtype) -> torch.Tensor: contig = make_contiguous((1, size), dtype) non_contig = contig[:1, 1:size - 1] self.assertTrue(non_contig.is_contiguous()) return contig types = [] if use_floating: types += float_types if use_integral: types += int_types if use_complex: types += complex_types tensors: Dict[str, List[torch.Tensor]] = {"cont": [], "noncont": [], "slice": []} for dtype in types: tensors["cont"].append(make_contiguous(shape, dtype)) tensors["noncont"].append(make_non_contiguous(shape, dtype)) tensors["slice"].append(make_contiguous_slice(sum(list(shape)), dtype)) return tensors def test_dir(self): dir(torch) @wrapDeterministicFlagAPITest def test_deterministic_flag(self): for deterministic in [True, False]: torch.use_deterministic_algorithms(deterministic) self.assertEqual(deterministic, torch.are_deterministic_algorithms_enabled()) with self.assertRaisesRegex(RuntimeError, r"use_deterministic_algorithms expects a bool, but got int"): torch.use_deterministic_algorithms(1) def test_type_conversion_via_dtype_name(self): x = torch.tensor([1]) self.assertEqual(x.byte().dtype, torch.uint8) self.assertEqual(x.bool().dtype, torch.bool) self.assertEqual(x.char().dtype, torch.int8) self.assertEqual(x.double().dtype, torch.float64) self.assertEqual(x.float().dtype, torch.float32) self.assertEqual(x.half().dtype, torch.float16) self.assertEqual(x.int().dtype, torch.int32) self.assertEqual(x.bfloat16().dtype, torch.bfloat16) cfloat = x.cfloat() self.assertEqual(cfloat.dtype, torch.complex64) self.assertEqual(cfloat.real, x.float()) self.assertEqual(cfloat.imag, torch.zeros_like(cfloat.imag)) cdouble = x.cdouble() self.assertEqual(cdouble.dtype, torch.complex128) self.assertEqual(cdouble.real, x.double()) self.assertEqual(cdouble.imag, torch.zeros_like(cdouble.imag)) def test_doc_template(self) -> None: from torch._torch_docs import __file__ as doc_file from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args with open(doc_file, "r", encoding="utf-8") as f: doc_strs = f.read() for doc_str in re.findall(r'add_docstr\((.*?),.*?("""|\'\'\')(.*?)("""|\'\'\')\)', doc_strs, re.MULTILINE | re.DOTALL): for common_args in [multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args]: for k, v in common_args.items(): self.assertNotIn(v, doc_str[2], 'The argument description "{}" in {} can be ' 'replaced by {{{}}}'.format(v, doc_str[0], k)) def test_doc(self): checked_types = (types.MethodType, types.FunctionType, types.BuiltinFunctionType, types.BuiltinMethodType) def test_namespace(ns, *skips): if isinstance(ns, object): ns_name = ns.__class__.__name__ else: ns_name = ns.__name__ skip_regexes = [] for r in skips: if isinstance(r, string_classes): skip_regexes.append(re.compile('^{}$'.format(re.escape(r)))) else: skip_regexes.append(r) for name in dir(ns): if name.startswith('_'): continue if name in ['real', 'imag']: y = torch.randn(1, dtype=torch.cfloat) var = getattr(y, name) else: var = getattr(ns, name) if not isinstance(var, checked_types): continue doc = var.__doc__ has_doc = doc is not None and len(doc.strip()) > 0 full_name = ns_name + '.' + name if any(r.match(name) for r in skip_regexes): self.assertFalse(has_doc, 'New docs have been added for {}, please remove ' 'it from the skipped list in TestTorch.test_doc'.format(full_name)) else: self.assertTrue(has_doc, '{} is missing documentation'.format(full_name)) # FIXME: All of the following should be marked as expected failures # so that it is easier to tell when missing has been added. # FIXME: fix all the skipped ones below! test_namespace(torch.randn(1), 'as_strided_', re.compile('^clamp_(min|max)_?$'), 'is_distributed', 'is_nonzero', 'is_same_size', 'log_softmax', 'map2_', 'new', 'reinforce', 'relu', 'relu_', 'prelu', 'resize', 'resize_as', 'softmax', 'split_with_sizes', 'unsafe_split_with_sizes', ) test_namespace(torch.nn) test_namespace(torch.nn.functional, 'assert_int_or_pair') # TODO: add torch.* tests when we have proper namespacing on ATen functions # test_namespace(torch) def test_msnpu_error(self): with self.assertRaisesRegex(RuntimeError, "Could not run 'aten::empty.memory_format' with arguments from the 'MSNPU' backend"): torch.zeros(1, device=torch.device('msnpu')) def test_has_storage(self): self.assertIsNotNone(torch.tensor([]).storage()) self.assertIsNotNone(torch.empty(0).storage()) self.assertIsNotNone(torch.tensor([]).clone().storage()) self.assertIsNotNone(torch.tensor([0, 0, 0]).nonzero().storage()) self.assertIsNotNone(torch.tensor([]).new().storage()) def test_where_invalid_device(self): if torch.cuda.is_available(): for devices in [('cpu', 'cuda', 'cuda'), ('cuda', 'cpu', 'cpu'), ('cuda', 'cpu', 'cuda'), ('cpu', 'cuda', 'cpu')]: condition = torch.rand(16, device=devices[0]) x = torch.rand(16, device=devices[1]) y = torch.rand(16, device=devices[2]) with self.assertRaisesRegex(RuntimeError, "Expected condition, x and y to be on the same device"): torch.where(condition, x, y) def test_where_bool_tensor(self): for d in torch.testing.get_all_device_types(): a = torch.tensor([True, False], device=d) res = torch.where(a > 0) self.assertEqual(1, len(res)) def test_where_tensor(self): def rand_tensor(size, dtype, device): if dtype.is_floating_point or dtype.is_complex: return torch.rand(size=size, dtype=dtype, device=device) elif dtype == torch.uint8: return torch.randint(1, 5, size=size, dtype=dtype, device=device) elif dtype == torch.bool: return torch.randint(0, 1, size=size, dtype=dtype, device=device).bool() else: return torch.randint(-5, 5, size=size, dtype=dtype, device=device) def get_tensor(size, dtype, device, contiguous): if not contiguous and len(size) < 2: raise RuntimeError("Unable to generate non contiguous tensor with size < 2") t = rand_tensor(size, dtype, device) if contiguous: return t else: return t.transpose(0, 1) height = 5 width = 5 for device in torch.testing.get_all_device_types(): for dt1 in torch.testing.get_all_dtypes(): for dt2 in torch.testing.get_all_dtypes(): for contiguous in [True, False]: x1 = get_tensor((height, width), dt1, device, contiguous) x2 = get_tensor((height, width), dt2, device, contiguous) if dt1 != dt2: self.assertRaisesRegex(RuntimeError, "expected scalar type", lambda: torch.where(x1 == 1, x1, x2)) else: if x1.is_floating_point(): condition = (x1 < 0.5) elif x1.is_complex(): condition = (x1.abs() < 0.5) else: condition = (x1 == 1) expected = condition.to(x1.dtype) * x1 + (~condition).to(x2.dtype) * x2 result = torch.where(condition, x1, x2) self.assertEqual(expected, result) def test_dtypes(self): all_dtypes = torch.testing.get_all_dtypes() do_test_dtypes(self, all_dtypes, torch.strided, torch.device('cpu')) if torch.cuda.is_available(): all_dtypes.remove(torch.bfloat16) # Remove once _th_zero_ is enabled on cuda for bfloat16 do_test_dtypes(self, all_dtypes, torch.strided, torch.device('cuda:0')) def test_copy_dtypes(self): all_dtypes = torch.testing.get_all_dtypes() for dtype in all_dtypes: copied_dtype = copy.deepcopy(dtype) self.assertIs(dtype, copied_dtype) def test_copy_transpose(self): x = torch.arange(100 * 100, dtype=torch.float).reshape(100, 100).t() y = torch.empty(100, 100, dtype=torch.float) y.copy_(x) self.assertEqual(y[:, 0], range(100)) self.assertEqual(y[:, 40], range(4000, 4100)) y = torch.empty(100, 100, dtype=torch.double) y.copy_(x) self.assertEqual(y[:, 0], range(100)) self.assertEqual(y[:, 40], range(4000, 4100)) # Validates regression reported in https://github.com/pytorch/pytorch/issues/45269 x = torch.arange(100 * 100).reshape(100, 100).to(dtype=torch.cfloat).t() y = torch.empty(100, 100, dtype=torch.cfloat) y.copy_(x) self.assertEqual(y[:, 0], range(100)) self.assertEqual(y[:, 40], range(4000, 4100)) def test_device(self): cpu = torch.device('cpu') self.assertEqual('cpu', str(cpu)) self.assertEqual('cpu', cpu.type) self.assertEqual(None, cpu.index) cpu0 = torch.device('cpu:0') self.assertEqual('cpu:0', str(cpu0)) self.assertEqual('cpu', cpu0.type) self.assertEqual(0, cpu0.index) cpu0 = torch.device('cpu', 0) self.assertEqual('cpu:0', str(cpu0)) self.assertEqual('cpu', cpu0.type) self.assertEqual(0, cpu0.index) cuda = torch.device('cuda') self.assertEqual('cuda', str(cuda)) self.assertEqual('cuda', cuda.type) self.assertEqual(None, cuda.index) cuda1 = torch.device('cuda:1') self.assertEqual('cuda:1', str(cuda1)) self.assertEqual('cuda', cuda1.type) self.assertEqual(1, cuda1.index) cuda1 = torch.device('cuda', 1) self.assertEqual('cuda:1', str(cuda1)) self.assertEqual('cuda', cuda1.type) self.assertEqual(1, cuda1.index) cuda90 = torch.device('cuda', 90) self.assertEqual('cuda:90', str(cuda90)) self.assertEqual('cuda', cuda90.type) self.assertEqual(90, cuda90.index) self.assertRaises(RuntimeError, lambda: torch.device('cpu:-1')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:-1')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2 ')) self.assertRaises(RuntimeError, lambda: torch.device('cuda: 2')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2 2')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2.')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2?')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:?2')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2.232')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2 cuda:3')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2+cuda:3')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2cuda:3')) self.assertRaises(RuntimeError, lambda: torch.device(-1)) self.assertRaises(RuntimeError, lambda: torch.device('other')) self.assertRaises(RuntimeError, lambda: torch.device('other:0')) device_set = {'cpu', 'cpu:0', 'cuda', 'cuda:0', 'cuda:1', 'cuda:10', 'cuda:100'} device_hash_set = set() for device in list(device_set): device_hash_set.add(hash(torch.device(device))) self.assertEqual(len(device_set), len(device_hash_set)) def get_expected_device_repr(device): if device.index is not None: return "device(type='{type}', index={index})".format( type=device.type, index=device.index) return "device(type='{type}')".format(type=device.type) for device in device_set: dev = torch.device(device) self.assertEqual(repr(dev), get_expected_device_repr(dev)) def test_to(self): def test_copy_behavior(t, non_blocking=False): self.assertIs(t, t.to(t, non_blocking=non_blocking)) self.assertIs(t, t.to(t.dtype, non_blocking=non_blocking)) self.assertIs(t, t.to(torch.empty_like(t), non_blocking=non_blocking)) self.assertIsNot(t, t.to(t, non_blocking=non_blocking, copy=True)) self.assertIsNot(t, t.to(t.dtype, non_blocking=non_blocking, copy=True)) self.assertIsNot(t, t.to(torch.empty_like(t), non_blocking=non_blocking, copy=True)) devices = [t.device] if t.device.type == 'cuda': if t.device.index == -1: devices.append('cuda:{}'.format(torch.cuda.current_device())) elif t.device.index == torch.cuda.current_device(): devices.append('cuda') for device in devices: self.assertIs(t, t.to(device, non_blocking=non_blocking)) self.assertIs(t, t.to(device, t.dtype, non_blocking=non_blocking)) self.assertIsNot(t, t.to(device, non_blocking=non_blocking, copy=True)) self.assertIsNot(t, t.to(device, t.dtype, non_blocking=non_blocking, copy=True)) a = torch.tensor(5) test_copy_behavior(a) self.assertEqual(a.device, a.to('cpu').device) self.assertEqual(a.device, a.to('cpu', dtype=torch.float32).device) self.assertIs(torch.float32, a.to('cpu', dtype=torch.float32).dtype) self.assertEqual(a.device, a.to(torch.float32).device) self.assertIs(torch.float32, a.to(dtype=torch.float32).dtype) self.assertEqual(a.data_ptr(), a.to('cpu').data_ptr()) self.assertEqual(a.data_ptr(), a.to(dtype=a.dtype, device=a.device, copy=False).data_ptr()) self.assertEqual(a.data_ptr(), a.to('cpu', copy=False).data_ptr()) self.assertNotEqual(a.data_ptr(), a.to('cpu', copy=True).data_ptr()) if torch.cuda.is_available(): for non_blocking in [True, False]: for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']: b = torch.tensor(5., device=cuda) test_copy_behavior(b, non_blocking) self.assertEqual(b.device, b.to(cuda, non_blocking=non_blocking).device) self.assertEqual(a.device, b.to('cpu', non_blocking=non_blocking).device) self.assertEqual(b.device, a.to(cuda, non_blocking=non_blocking).device) self.assertIs(torch.int32, b.to('cpu', dtype=torch.int32, non_blocking=non_blocking).dtype) self.assertEqual(a.device, b.to('cpu', dtype=torch.int32, non_blocking=non_blocking).device) self.assertIs(torch.int32, b.to(dtype=torch.int32).dtype) self.assertEqual(b.device, b.to(dtype=torch.int32).device) def test_to_with_tensor(self): a = torch.tensor(5) self.assertEqual(a.device, a.to(a).device) if torch.cuda.is_available(): for non_blocking in [True, False]: for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']: b = torch.tensor(5., device=cuda) self.assertEqual(b.device, b.to(b, non_blocking=non_blocking).device) self.assertEqual(a.device, b.to(a, non_blocking=non_blocking).device) self.assertEqual(b.device, a.to(b, non_blocking=non_blocking).device) def test_as_subclass(self): class SubTensor(torch.Tensor): member_var = object() t0 = torch.tensor(0) t1 = torch.tensor([1, 2]) t2 = torch.tensor([[3, 4], [5, 6]]) s0 = t0.as_subclass(SubTensor) s1 = t1.as_subclass(SubTensor) s2 = t2.as_subclass(SubTensor) # Check that the correct type is returned. self.assertTrue(type(s0) is SubTensor) self.assertTrue(type(s1) is SubTensor) self.assertTrue(type(s2) is SubTensor) # Check that the data is equal. self.assertEqual(t0, s0) self.assertEqual(t1, s1) self.assertEqual(t2, s2) t0[()] = 1 t1[1] = 3 t2[1, 1] = 7 # Check that the data is equal even after modification. self.assertEqual(t0, s0) self.assertEqual(t1, s1) self.assertEqual(t2, s2) # Check that member variables are passed through. self.assertTrue(s0.member_var is SubTensor.member_var) self.assertTrue(s1.member_var is SubTensor.member_var) self.assertTrue(s2.member_var is SubTensor.member_var) # Test that autograd is propagated. t = torch.tensor(5, dtype=torch.float32, requires_grad=True) # Run a calculation on the tensor. exp_t = torch.exp(t) # Cast exp_t to a subclass. exp_s = exp_t.as_subclass(SubTensor) # Make sure that t.grad was initially None self.assertTrue(t.grad is None) # Run the autograd calculation. exp_s.backward() # Make sure autograd was propagated to the original tensor # declared with requires_grad. self.assertTrue(t.grad is not None) def test_type(self): x = torch.randn(3, 3).double() self.assertEqual(x.type('torch.FloatTensor').dtype, torch.float32) self.assertEqual(x.type(torch.FloatTensor).dtype, torch.float32) self.assertEqual(x.int().type(torch.Tensor).dtype, torch.get_default_dtype()) self.assertEqual(x.type(torch.int32).dtype, torch.int32) def test_qengine(self): qengines = torch.backends.quantized.supported_engines original_qe = torch.backends.quantized.engine for qe in qengines: torch.backends.quantized.engine = qe assert torch.backends.quantized.engine == qe, 'qengine not set successfully' torch.backends.quantized.engine = original_qe def _spawn_method(self, method, arg): try: mp.set_start_method('spawn') except RuntimeError: pass with mp.Pool(1) as pool: out: list = pool.map(method, [arg]) self.assertTrue(out[0]) @staticmethod def _test_multinomial_invalid_probs(probs): try: # n_sample = 1 is a special case, test n_sample=2 which is more general torch.multinomial(probs.to('cpu'), 2) return False # Should not be reached except RuntimeError as e: return 'probability tensor contains either `inf`, `nan` or element < 0' in str(e) @slowTest @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \ don't support multiprocessing with spawn start method") @unittest.skipIf(IS_WINDOWS, 'FIXME: CUDA OOM error on Windows') def test_multinomial_invalid_probs(self): test_method = AbstractTestCases._TestTorchMixin._test_multinomial_invalid_probs self._spawn_method(test_method, torch.tensor([1., -1., 1.])) self._spawn_method(test_method, torch.tensor([1., inf, 1.])) self._spawn_method(test_method, torch.tensor([1., -inf, 1.])) self._spawn_method(test_method, torch.tensor([1., 1., nan])) def test_copy_broadcast(self): torch.zeros(5, 6).copy_(torch.zeros(6)) self.assertRaises(RuntimeError, lambda: torch.zeros(5, 6).copy_(torch.zeros(30))) def test_copy_many_to_one(self): # Testing in-place copy where it attempt to write from many memory # storage to a single storage would cause RuntimeError to be thrown self.assertRaises(RuntimeError, lambda: torch.zeros(1, 6).expand(5, 6).copy_(torch.zeros(5, 6))) def test_slice(self): empty = torch.empty(0, 4) x = torch.arange(0., 16).view(4, 4) self.assertEqual(x[:], x) self.assertEqual(x[:4], x) # start and stop are clamped to the size of dim self.assertEqual(x[:5], x) # if start >= stop then the result is empty self.assertEqual(x[2:1], empty) self.assertEqual(x[2:2], empty) # out of bounds is also empty self.assertEqual(x[10:12], empty) # additional correctness checks self.assertEqual(x[:1].tolist(), [[0, 1, 2, 3]]) self.assertEqual(x[:-3].tolist(), [[0, 1, 2, 3]]) self.assertEqual(x[:, -2:3].tolist(), [[2], [6], [10], [14]]) self.assertEqual(x[0:-1:2].tolist(), [[0, 1, 2, 3], [8, 9, 10, 11]]) @unittest.skip("Not implemented yet") def test_conv2(self): x = torch.rand(math.floor(torch.uniform(50, 100)), math.floor(torch.uniform(50, 100))) k = torch.rand(math.floor(torch.uniform(10, 20)), math.floor(torch.uniform(10, 20))) imvc = torch.conv2(x, k) imvc2 = torch.conv2(x, k, 'V') imfc = torch.conv2(x, k, 'F') ki = k.clone() ks = k.storage() kis = ki.storage() for i in range(ks.size() - 1, 0, -1): kis[ks.size() - i + 1] = ks[i] # for i=ks.size(), 1, -1 do kis[ks.size()-i+1]=ks[i] end imvx = torch.xcorr2(x, ki) imvx2 = torch.xcorr2(x, ki, 'V') imfx = torch.xcorr2(x, ki, 'F') self.assertEqual(imvc, imvc2, atol=0, rtol=0, msg='torch.conv2') self.assertEqual(imvc, imvx, atol=0, rtol=0, msg='torch.conv2') self.assertEqual(imvc, imvx2, atol=0, rtol=0, msg='torch.conv2') self.assertEqual(imfc, imfx, atol=0, rtol=0, msg='torch.conv2') self.assertLessEqual(math.abs(x.dot(x) - torch.xcorr2(x, x)[0][0]), 1e-10, 'torch.conv2') xx = torch.empty(2, x.size(1), x.size(2)) xx[1].copy_(x) xx[2].copy_(x) kk = torch.empty(2, k.size(1), k.size(2)) kk[1].copy_(k) kk[2].copy_(k) immvc = torch.conv2(xx, kk) immvc2 = torch.conv2(xx, kk, 'V') immfc = torch.conv2(xx, kk, 'F') self.assertEqual(immvc[0], immvc[1], atol=0, rtol=0, msg='torch.conv2') self.assertEqual(immvc[0], imvc, atol=0, rtol=0, msg='torch.conv2') self.assertEqual(immvc2[0], imvc2, atol=0, rtol=0, msg='torch.conv2') self.assertEqual(immfc[0], immfc[1], atol=0, rtol=0, msg='torch.conv2') self.assertEqual(immfc[0], imfc, atol=0, rtol=0, msg='torch.conv2') @unittest.skip("Not implemented yet") def test_conv3(self): x = torch.rand(math.floor(torch.uniform(20, 40)), math.floor(torch.uniform(20, 40)), math.floor(torch.uniform(20, 40))) k = torch.rand(math.floor(torch.uniform(5, 10)), math.floor(torch.uniform(5, 10)), math.floor(torch.uniform(5, 10))) imvc = torch.conv3(x, k) imvc2 = torch.conv3(x, k, 'V') imfc = torch.conv3(x, k, 'F') ki = k.clone() ks = k.storage() kis = ki.storage() for i in range(ks.size() - 1, 0, -1): kis[ks.size() - i + 1] = ks[i] imvx = torch.xcorr3(x, ki) imvx2 = torch.xcorr3(x, ki, 'V') imfx = torch.xcorr3(x, ki, 'F') self.assertEqual(imvc, imvc2, atol=0, rtol=0, msg='torch.conv3') self.assertEqual(imvc, imvx, atol=0, rtol=0, msg='torch.conv3') self.assertEqual(imvc, imvx2, atol=0, rtol=0, msg='torch.conv3') self.assertEqual(imfc, imfx, atol=0, rtol=0, msg='torch.conv3') self.assertLessEqual(math.abs(x.dot(x) - torch.xcorr3(x, x)[0][0][0]), 4e-10, 'torch.conv3') xx = torch.empty(2, x.size(1), x.size(2), x.size(3)) xx[1].copy_(x) xx[2].copy_(x) kk = torch.empty(2, k.size(1), k.size(2), k.size(3)) kk[1].copy_(k) kk[2].copy_(k) immvc = torch.conv3(xx, kk) immvc2 = torch.conv3(xx, kk, 'V') immfc = torch.conv3(xx, kk, 'F') self.assertEqual(immvc[0], immvc[1], atol=0, rtol=0, msg='torch.conv3') self.assertEqual(immvc[0], imvc, atol=0, rtol=0, msg='torch.conv3') self.assertEqual(immvc2[0], imvc2, atol=0, rtol=0, msg='torch.conv3') self.assertEqual(immfc[0], immfc[1], atol=0, rtol=0, msg='torch.conv3') self.assertEqual(immfc[0], imfc, atol=0, rtol=0, msg='torch.conv3') @unittest.skip("Not implemented yet") def _test_conv_corr_eq(self, fn, fn_2_to_3): ix = math.floor(random.randint(20, 40)) iy = math.floor(random.randint(20, 40)) iz = math.floor(random.randint(20, 40)) kx = math.floor(random.randint(5, 10)) ky = math.floor(random.randint(5, 10)) kz = math.floor(random.randint(5, 10)) x = torch.rand(ix, iy, iz) k = torch.rand(kx, ky, kz) o3 = fn(x, k) o32 = torch.zeros(o3.size()) fn_2_to_3(x, k, o3, o32) self.assertEqual(o3, o32) @unittest.skip("Not implemented yet") def test_xcorr3_xcorr2_eq(self): def reference(x, k, o3, o32): for i in range(o3.size(1)): for j in range(k.size(1)): o32[i].add(torch.xcorr2(x[i + j - 1], k[j])) self._test_conv_corr_eq(torch.xcorr3, reference) @unittest.skip("Not implemented yet") def test_xcorr3_xcorr2_eq_full(self): def reference(x, k, o3, o32): for i in range(x.size(1)): for j in range(k.size(1)): o32[i].add(torch.xcorr2(x[i], k[k.size(1) - j + 1], 'F')) self._test_conv_corr_eq(lambda x, k: torch.xcorr3(x, k, 'F'), reference) @unittest.skip("Not implemented yet") def test_conv3_conv2_eq_valid(self): def reference(x, k, o3, o32): for i in range(o3.size(1)): for j in range(k.size(1)): o32[i].add(torch.conv2(x[i + j - 1], k[k.size(1) - j + 1])) self._test_conv_corr_eq(torch.conv3, reference) @unittest.skip("Not implemented yet") def test_fconv3_fconv2_eq(self): def reference(x, k, o3, o32): for i in range(o3.size(1)): for j in range(k.size(1)): o32[i + j - 1].add(torch.conv2(x[i], k[j], 'F')) self._test_conv_corr_eq(lambda x, k: torch.conv3(x, k, 'F'), reference) def test_dtype_is_signed(self): for dtype in torch.testing.get_all_dtypes(): self.assertEqual(dtype.is_signed, torch.is_signed(torch.tensor(0, dtype=dtype))) self.assertRaisesRegex(RuntimeError, 'not supported for quantized', lambda: torch.quint8.is_signed) self.assertRaisesRegex(RuntimeError, 'not supported for quantized', lambda: torch.qint8.is_signed) self.assertRaisesRegex(RuntimeError, 'not supported for quantized', lambda: torch.qint32.is_signed) def test_RNGState(self): state = torch.get_rng_state() stateCloned = state.clone() before = torch.rand(1000) self.assertEqual(state.ne(stateCloned).long().sum(), 0, atol=0, rtol=0) torch.set_rng_state(state) after = torch.rand(1000) self.assertEqual(before, after, atol=0, rtol=0) def test_RNGStateAliasing(self): # Fork the random number stream at this point gen = torch.Generator() gen.set_state(torch.get_rng_state()) self.assertEqual(gen.get_state(), torch.get_rng_state()) target_value = torch.rand(1000) # Dramatically alter the internal state of the main generator _ = torch.rand(100000) forked_value = torch.rand(1000, generator=gen) self.assertEqual(target_value, forked_value, atol=0, rtol=0, msg="RNG has not forked correctly.") def test_RNG_after_pickle(self): torch.random.manual_seed(100) before = torch.rand(10) torch.random.manual_seed(100) buf = io.BytesIO() tensor = torch.tensor([1, 2, 3]) ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(tensor) after = torch.rand(10) self.assertEqual(before, after, atol=0, rtol=0) def test_boxMullerState(self): torch.manual_seed(123) odd_number = 101 seeded = torch.randn(odd_number) state = torch.get_rng_state() midstream = torch.randn(odd_number) torch.set_rng_state(state) repeat_midstream = torch.randn(odd_number) torch.manual_seed(123) reseeded = torch.randn(odd_number) self.assertEqual(midstream, repeat_midstream, atol=0, rtol=0, msg='get_rng_state/set_rng_state not generating same sequence of normally distributed numbers') self.assertEqual(seeded, reseeded, atol=0, rtol=0, msg='repeated calls to manual_seed not generating same sequence of normally distributed numbers') def test_manual_seed(self): rng_state = torch.get_rng_state() torch.manual_seed(2) x = torch.randn(100) self.assertEqual(torch.initial_seed(), 2) torch.manual_seed(2) y = torch.randn(100) self.assertEqual(x, y) max_int64 = 0x7fff_ffff_ffff_ffff min_int64 = -max_int64 - 1 max_uint64 = 0xffff_ffff_ffff_ffff # Check all boundary cases of valid seed value inputs test_cases = [ # (seed, expected_initial_seed) # Positive seeds should be unchanged (max_int64, max_int64), (max_int64 + 1, max_int64 + 1), (max_uint64, max_uint64), (0, 0), # Negative seeds wrap around starting from the largest seed value (-1, max_uint64), (min_int64, max_int64 + 1) ] for seed, expected_initial_seed in test_cases: torch.manual_seed(seed) actual_initial_seed = torch.initial_seed() msg = "expected initial_seed() = %x after calling manual_seed(%x), but got %x instead" % ( expected_initial_seed, seed, actual_initial_seed) self.assertEqual(expected_initial_seed, actual_initial_seed, msg=msg) for invalid_seed in [min_int64 - 1, max_uint64 + 1]: with self.assertRaisesRegex(RuntimeError, r'Overflow when unpacking long'): torch.manual_seed(invalid_seed) torch.set_rng_state(rng_state) def test_numel(self): b = torch.ByteTensor(3, 100, 100) self.assertEqual(b.nelement(), 3 * 100 * 100) self.assertEqual(b.numel(), 3 * 100 * 100) def test_empty_storage_view(self): # we should be able to "modify" slices of a 0-element # array without an error being raised due to # trying to resize its storage t = torch.from_numpy(np.empty((0, 4))) t[:, 1::2] *= 1 def test_newaxis_numpy_comparison(self): def run_test(tensor, *idx): npt = tensor.numpy() self.assertEqual(tensor[idx], npt[idx]) # 1D Tensor Tests x = torch.arange(0, 10) cases = [ [None], [None, None], [Ellipsis, None], [None, Ellipsis], [2, None], [None, 2], [Ellipsis, None, 2], [Ellipsis, 2, None], [2, Ellipsis, None], [2, None, Ellipsis], [None, 2, Ellipsis], [None, Ellipsis, 2], ] for case in cases: run_test(x, *case) # 2D Tensor Tests x = torch.arange(0, 12).view(3, 4) cases = [ [None], [None, None], [None, None, None], [Ellipsis, None], [Ellipsis, None, None], [None, Ellipsis], [None, Ellipsis, None], [None, None, Ellipsis], [2, None], [2, None, Ellipsis], [2, Ellipsis, None], [None, 2, Ellipsis], [Ellipsis, 2, None], [Ellipsis, None, 2], [None, Ellipsis, 2], [1, 2, None], [1, 2, Ellipsis, None], [1, Ellipsis, 2, None], [Ellipsis, 1, None, 2], [Ellipsis, 1, 2, None], [1, None, 2, Ellipsis], [None, 1, Ellipsis, 2], [None, 1, 2, Ellipsis], ] for case in cases: run_test(x, *case) def _consecutive(self, size, start=1): sequence = torch.ones(torch.tensor(size).prod(0)).cumsum(0) sequence.add_(start - 1) return sequence.resize_(*size) def test_newindex(self): reference = self._consecutive((3, 3, 3)) # This relies on __index__() being correct - but we have separate tests for that def checkPartialAssign(index): reference = torch.zeros(3, 3, 3) reference[index] = self._consecutive((3, 3, 3))[index] self.assertEqual(reference[index], self._consecutive((3, 3, 3))[index], atol=0, rtol=0) reference[index] = 0 self.assertEqual(reference, torch.zeros(3, 3, 3), atol=0, rtol=0) checkPartialAssign(0) checkPartialAssign(1) checkPartialAssign(2) checkPartialAssign((0, 1)) checkPartialAssign((1, 2)) checkPartialAssign((0, 2)) checkPartialAssign(torch.LongTensor((0, 2))) with self.assertRaises(IndexError): reference[1, 1, 1, 1] = 1 with self.assertRaises(IndexError): reference[1, 1, 1, (1, 1)] = 1 with self.assertRaises(IndexError): reference[3, 3, 3, 3, 3, 3, 3, 3] = 1 with self.assertRaises(IndexError): reference[0.0] = 1 with self.assertRaises(TypeError): reference[0.0:2.0] = 1 with self.assertRaises(IndexError): reference[0.0, 0.0:2.0] = 1 with self.assertRaises(IndexError): reference[0.0, :, 0.0:2.0] = 1 with self.assertRaises(IndexError): reference[0.0, ..., 0.0:2.0] = 1 with self.assertRaises(IndexError): reference[0.0, :, 0.0] = 1 def test_index_add(self): for device in torch.testing.get_all_device_types(): for dest_contig, src_contig, index_contig in product([True, False], repeat=3): for other_sizes in ((), (4, 5)): for dtype in [torch.int, torch.long]: num_copy, num_dest = 3, 3 dest = torch.randn(num_dest, *other_sizes, device=device) if not dest_contig: dest = torch.testing.make_non_contiguous(dest) src = torch.randn(num_copy, *other_sizes, device=device) if not src_contig: src = torch.testing.make_non_contiguous(src) idx = torch.randperm(num_dest, dtype=dtype, device=device).narrow(0, 0, num_copy) if not index_contig: idx = torch.testing.make_non_contiguous(idx) # index_add_ without alpha argument dest2 = dest.clone() dest.index_add_(0, idx, src) for i in range(idx.size(0)): dest2[idx[i]] += src[i] self.assertEqual(dest, dest2) # index_add_ with alpha argument dest2 = dest.clone() dest.index_add_(0, idx, src, alpha=2) for i in range(idx.size(0)): dest2[idx[i]] += src[i] * 2 self.assertEqual(dest, dest2) # add coverage for issue with atomic add that appeared only for # specific dtypes on cuda: # https://github.com/pytorch/pytorch/issues/29153 def test_index_add_all_dtypes(self): for device in torch.testing.get_all_device_types(): for dtype in torch.testing.get_all_math_dtypes(device): for idx_dtype in [torch.int, torch.long]: size = [5, 5] if dtype.is_floating_point or dtype.is_complex: tensor = torch.rand(size, dtype=dtype, device=device) elif dtype.is_signed: tensor = torch.randint(-5, 15, size, dtype=dtype, device=device) else: tensor = torch.randint(0, 10, size, dtype=dtype, device=device) # index_add calls atomicAdd on cuda. zeros = torch.zeros(size, dtype=dtype, device=device) added = zeros.index_add(0, torch.arange(0, size[0], dtype=idx_dtype, device=device), tensor) self.assertEqual(added, tensor) added = zeros.index_add(0, torch.arange(0, size[0], dtype=idx_dtype, device=device), tensor, alpha=-1) self.assertEqual(added, -tensor) # Fill idx with valid indices. @staticmethod def _fill_indices(self, idx, dim, dim_size, elems_per_row, m, n, o): for i in range(1 if dim == 0 else m): for j in range(1 if dim == 1 else n): for k in range(1 if dim == 2 else o): ii = [i, j, k] ii[dim] = slice(0, idx.size(dim) + 1) idx[tuple(ii)] = torch.randperm(dim_size)[0:elems_per_row] def test_unflatten(self): # test args: tensor, int, sizes self.assertEqual(torch.tensor([]).unflatten(0, (0, 1)), torch.empty(0, 1)) self.assertEqual(torch.tensor([1]).unflatten(0, (1, 1)), torch.tensor([[1]])) self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, (2, 2)), torch.tensor([[1, 2], [3, 4]])) self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, [2, 2]), torch.tensor([[1, 2], [3, 4]])) self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, torch.Size([2, 2])), torch.tensor([[1, 2], [3, 4]])) self.assertEqual(torch.ones(2, 10).unflatten(1, (5, 2)), torch.ones(2, 5, 2)) self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, (-1, 2)), torch.tensor([[1, 2], [3, 4]])) self.assertEqual(torch.ones(2, 10).unflatten(1, (5, -1)), torch.ones(2, 5, 2)) self.assertEqual(torch.ones(2, 10).unflatten(1, (-1,)), torch.ones(2, 10)) self.assertEqual(torch.ones(2, 3 * 4 * 5 * 6).unflatten(1, (3, 4, -1, 6)), torch.ones(2, 3, 4, 5, 6)) self.assertEqual(torch.ones(2, 0, 2).unflatten(1, (3, -1, 4, 5)), torch.ones(2, 3, 0, 4, 5, 2)) # test invalid args: tensor, str, sizes with self.assertRaisesRegex(TypeError, r"received an invalid combination of arguments"): torch.tensor([1]).unflatten('A', (1, 1)) # test invalid args: tensor, str, namedshape with self.assertRaisesRegex(RuntimeError, r"Name 'A' not found in Tensor\[None\]."): torch.ones(4).unflatten('A', (('A', 2), ('B', 2))) # test other invalid arguments with self.assertRaisesRegex(RuntimeError, r"sizes must be non-empty"): torch.tensor([1]).unflatten(0, []) with self.assertRaisesRegex(RuntimeError, r"Provided sizes \[2, 2\] don't multiply up to the size of dim 0 \(1\)"): torch.tensor([1]).unflatten(0, [2, 2]) with self.assertRaisesRegex(IndexError, r"dimension specified as 0 but tensor has no dimensions"): torch.tensor(1).unflatten(0, [0]) with self.assertRaisesRegex(RuntimeError, r"only one dimension can be inferred"): torch.randn(5, 10).unflatten(1, (-1, -1)) with self.assertRaisesRegex(RuntimeError, r"Provided sizes \[-1, 4\] don't multiply up to the size of dim 1 \(10\)"): torch.randn(5, 10).unflatten(1, (-1, 4)) with self.assertRaisesRegex(RuntimeError, r"the unspecified dimension size -1 can be any value and is ambiguous"): torch.randn(2, 0).unflatten(1, (2, -1, 0)) @staticmethod def _test_gather(self, cast, test_bounds=True): m, n, o = random.randint(10, 20), random.randint(10, 20), random.randint(10, 20) elems_per_row = random.randint(1, 10) dim = random.randrange(3) for dtype in {torch.float32, torch.complex64, torch.complex128}: src = torch.randn(m, n, o, dtype=dtype) idx_size = [m, n, o] idx_size[dim] = elems_per_row idx = torch.LongTensor().resize_(*idx_size) AbstractTestCases._TestTorchMixin._fill_indices(self, idx, dim, src.size(dim), elems_per_row, m, n, o) src = cast(src) idx = cast(idx) actual = torch.gather(src, dim, idx) expected = cast(torch.zeros(idx_size, dtype=dtype)) for i in range(idx_size[0]): for j in range(idx_size[1]): for k in range(idx_size[2]): ii = [i, j, k] ii[dim] = idx[i, j, k] expected[i, j, k] = src[tuple(ii)] self.assertEqual(actual, expected, atol=0, rtol=0) bad_src = torch.randn(*[i - 1 for i in idx_size]) self.assertRaises(RuntimeError, lambda: torch.gather(bad_src, dim, idx)) # should throw an error when index dtype is not long with self.assertRaisesRegex(RuntimeError, 'Expected dtype int64 for index'): torch.gather(src, dim, idx.to(torch.int)) # should throw an error when out.dtype != src.dtype. with self.assertRaisesRegex(RuntimeError, 'Expected self.dtype to be equal to src.dtype'): torch.gather(src, dim, idx, out=expected.to(torch.int)) # checks for the same dimensionality with self.assertRaisesRegex(RuntimeError, 'Index tensor must have the same number of dimensions as input tensor'): torch.gather(src, dim, idx.unsqueeze(-1)) with self.assertRaisesRegex(RuntimeError, 'Index tensor must have the same number of dimensions as input tensor'): torch.gather(src.unsqueeze(-1), dim, idx) if test_bounds: idx[0][0][0] = 23 self.assertRaises(RuntimeError, lambda: torch.gather(src, dim, idx)) src = cast(torch.randn(3, 4, 5)) expected, idx = src.max(2, True) expected = cast(expected) idx = cast(idx) actual = torch.gather(src, 2, idx) self.assertEqual(actual, expected, atol=0, rtol=0) # Bool test case t = torch.tensor([[False, True], [True, True]]) self.assertEqual(torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]])), torch.tensor([[False, False], [True, True]])) def test_gather(self): self._test_gather(self, lambda t: t) @staticmethod def _test_scatter_add_mult_index_base(self, cast): m, n = 30, 40 idx = torch.zeros(m, n).long() src = torch.ones(m, n) res0 = torch.zeros(m, n).scatter_add_(0, idx, src) res1 = torch.zeros(m, n).scatter_add_(1, idx, src) self.assertEqual(res0[0, :], m * torch.ones(n), atol=0, rtol=0) self.assertEqual(res1[:, 0], n * torch.ones(m), atol=0, rtol=0) def test_scatter_add_mult_index(self): self._test_scatter_add_mult_index_base(self, lambda t: t) @staticmethod def _test_scatter_base(self, cast, method, is_scalar=False, test_bounds=True, reduction=None, *, test_complex=False): if test_complex: dtypes = [torch.complex64, torch.complex128] else: dtypes = [torch.float16, torch.float32, torch.float64] for dtype in dtypes: m, n, o = random.randint(10, 20), random.randint(10, 20), random.randint(10, 20) elems_per_row = random.randint(1, 10) dim = random.randrange(3) idx_size = [m, n, o] idx_size[dim] = elems_per_row idx = cast(torch.LongTensor().resize_(*idx_size)) AbstractTestCases._TestTorchMixin._fill_indices(self, idx, dim, ([m, n, o])[dim], elems_per_row, m, n, o) src_size = [random.randint(1, 5) + s for s in idx_size] if is_scalar: src = random.random() else: src = cast(torch.randn(src_size, dtype=dtype)) base = cast(torch.randn(m, n, o, dtype=dtype)) if reduction: actual = getattr(base.clone(), method)(dim, idx, src, reduce=reduction) else: actual = getattr(base.clone(), method)(dim, idx, src) expected = base.clone() for i in range(idx_size[0]): for j in range(idx_size[1]): for k in range(idx_size[2]): ii = [i, j, k] ii[dim] = idx[i, j, k] if method == 'scatter_add_': expected[tuple(ii)] += src[i, j, k] else: # method may be 'scatter_' or 'scatter' # both might have a reduction argument value = src if is_scalar else src[i, j, k] if reduction == "add": expected[tuple(ii)] += value elif reduction == "multiply": expected[tuple(ii)] *= value else: expected[tuple(ii)] = value self.assertEqual(actual, expected, atol=0, rtol=0) # should throw an error when self.dtype != src.dtype. # we ignore the case when src is Scalar, as it gets # cast via src.to<scalar_t>. if not is_scalar: with self.assertRaisesRegex(RuntimeError, 'Expected self.dtype to be equal to src.dtype'): getattr(base.clone().type(torch.int), method)(dim, idx, src) with self.assertRaisesRegex(RuntimeError, 'Expected self.dtype to be equal to src.dtype'): getattr(base.clone(), method)(dim, idx, src.type(torch.int)) # should throw an error when index dtype is not long with self.assertRaisesRegex(RuntimeError, 'Expected dtype int64 for index'): getattr(base.clone(), method)(dim, idx.type(torch.int), src) # check for the same dimensionality with self.assertRaisesRegex(RuntimeError, 'Index tensor must have the same number of dimensions as self tensor'): getattr(base.clone().unsqueeze(-1), method)(dim, idx, src) with self.assertRaisesRegex(RuntimeError, 'Index tensor must have the same number of dimensions as self tensor'): getattr(base.clone(), method)(dim, idx.unsqueeze(-1), src) if not is_scalar: with self.assertRaisesRegex(RuntimeError, 'Index tensor must have the same number of dimensions as src tensor'): getattr(base.clone(), method)(dim, idx, src.unsqueeze(-1)) if test_bounds: idx[0][0][0] = 34 with self.assertRaises(RuntimeError): if reduction: getattr(base.clone(), method)(dim, idx, src, reduce=reduction) else: getattr(base.clone(), method)(dim, idx, src) # test for empty index, should be a no-op idx = cast(torch.LongTensor()) if reduction: actual = getattr(base.clone(), method)(dim, idx, src, reduce=reduction) else: actual = getattr(base.clone(), method)(dim, idx, src) self.assertEqual(actual, base, atol=0, rtol=0) def test_scatter(self): self._test_scatter_base(self, lambda t: t, 'scatter_') def test_scatterAdd(self): self._test_scatter_base(self, lambda t: t, 'scatter_add_') def test_scatterFill(self): self._test_scatter_base(self, lambda t: t, 'scatter_', True) def test_scatterReduce(self): for method in ["add", "multiply"]: self._test_scatter_base(self, lambda t: t, 'scatter_', reduction=method) self._test_scatter_base(self, lambda t: t, 'scatter_', True, reduction=method) def test_structseq_repr(self): a = torch.arange(250).reshape(5, 5, 10) expected = """ torch.return_types.max( values=tensor([[ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49], [ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], [140, 141, 142, 143, 144, 145, 146, 147, 148, 149], [190, 191, 192, 193, 194, 195, 196, 197, 198, 199], [240, 241, 242, 243, 244, 245, 246, 247, 248, 249]]), indices=tensor([[4, 4, 4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4, 4, 4]]))""" self.assertEqual(repr(a.max(1)), textwrap.dedent(expected).strip()) def test_is_same_size(self): t1 = torch.empty(3, 4, 9, 10) t2 = torch.empty(3, 4) t3 = torch.empty(1, 9, 3, 3) t4 = torch.empty(3, 4, 9, 10) self.assertFalse(t1.is_same_size(t2)) self.assertFalse(t1.is_same_size(t3)) self.assertTrue(t1.is_same_size(t4)) def test_tensor_set(self): t1 = torch.tensor([]) t2 = torch.empty(3, 4, 9, 10).uniform_() t1.set_(t2) self.assertEqual(t1.storage()._cdata, t2.storage()._cdata) size = torch.Size([9, 3, 4, 10]) t1.set_(t2.storage(), 0, size) self.assertEqual(t1.size(), size) t1.set_(t2.storage(), 0, tuple(size)) self.assertEqual(t1.size(), size) self.assertEqual(t1.stride(), (120, 40, 10, 1)) stride = (10, 360, 90, 1) t1.set_(t2.storage(), 0, size, stride) self.assertEqual(t1.stride(), stride) t1.set_(t2.storage(), 0, size=size, stride=stride) self.assertEqual(t1.size(), size) self.assertEqual(t1.stride(), stride) # test argument names t1 = torch.tensor([]) # 1. case when source is tensor t1.set_(source=t2) self.assertEqual(t1.storage()._cdata, t2.storage()._cdata) # 2. case when source is storage t1.set_(source=t2.storage()) self.assertEqual(t1.storage()._cdata, t2.storage()._cdata) # 3. case when source is storage, and other args also specified t1.set_(source=t2.storage(), storage_offset=0, size=size, stride=stride) self.assertEqual(t1.size(), size) self.assertEqual(t1.stride(), stride) t1 = torch.tensor([True, True], dtype=torch.bool) t2 = torch.tensor([False, False], dtype=torch.bool) t1.set_(t2) self.assertEqual(t1.storage()._cdata, t2.storage()._cdata) def test_tensor_set_errors(self): f_cpu = torch.randn((2, 3), dtype=torch.float32) d_cpu = torch.randn((2, 3), dtype=torch.float64) # change dtype self.assertRaises(RuntimeError, lambda: f_cpu.set_(d_cpu.storage())) self.assertRaises(RuntimeError, lambda: f_cpu.set_(d_cpu.storage(), 0, d_cpu.size(), d_cpu.stride())) self.assertRaises(RuntimeError, lambda: f_cpu.set_(d_cpu)) # change device if torch.cuda.is_available(): f_cuda = torch.randn((2, 3), dtype=torch.float32, device='cuda') # cpu -> cuda self.assertRaises(RuntimeError, lambda: f_cpu.set_(f_cuda.storage())) self.assertRaises(RuntimeError, lambda: f_cpu.set_(f_cuda.storage(), 0, f_cuda.size(), f_cuda.stride())) self.assertRaises(RuntimeError, lambda: f_cpu.set_(f_cuda)) # cuda -> cpu self.assertRaises(RuntimeError, lambda: f_cuda.set_(f_cpu.storage())) self.assertRaises(RuntimeError, lambda: f_cuda.set_(f_cpu.storage(), 0, f_cpu.size(), f_cpu.stride())) self.assertRaises(RuntimeError, lambda: f_cuda.set_(f_cpu)) def test_equal(self): # Contiguous, 1D t1 = torch.tensor((3., 4., 9., 10.)) t2 = t1.contiguous() t3 = torch.tensor((1., 9., 3., 10.)) t4 = torch.tensor((3., 4., 9.)) t5 = torch.tensor([]) self.assertTrue(t1.equal(t2)) self.assertFalse(t1.equal(t3)) self.assertFalse(t1.equal(t4)) self.assertFalse(t1.equal(t5)) self.assertTrue(torch.equal(t1, t2)) self.assertFalse(torch.equal(t1, t3)) self.assertFalse(torch.equal(t1, t4)) self.assertFalse(torch.equal(t1, t5)) # Non contiguous, 2D s = torch.tensor(((1, 2, 3, 4), (5, 6, 7, 8))) s1 = s[:, 1:3] s2 = s1.clone() s3 = torch.tensor(((2, 3), (6, 7))) s4 = torch.tensor(((0, 0), (0, 0))) self.assertFalse(s1.is_contiguous()) self.assertTrue(s1.equal(s2)) self.assertTrue(s1.equal(s3)) self.assertFalse(s1.equal(s4)) self.assertTrue(torch.equal(s1, s2)) self.assertTrue(torch.equal(s1, s3)) self.assertFalse(torch.equal(s1, s4)) def test_element_size(self): byte = torch.ByteStorage().element_size() char = torch.CharStorage().element_size() short = torch.ShortStorage().element_size() int = torch.IntStorage().element_size() long = torch.LongStorage().element_size() float = torch.FloatStorage().element_size() double = torch.DoubleStorage().element_size() bool = torch.BoolStorage().element_size() bfloat16 = torch.BFloat16Storage().element_size() complexfloat = torch.ComplexFloatStorage().element_size() complexdouble = torch.ComplexDoubleStorage().element_size() self.assertEqual(byte, torch.ByteTensor().element_size()) self.assertEqual(char, torch.CharTensor().element_size()) self.assertEqual(short, torch.ShortTensor().element_size()) self.assertEqual(int, torch.IntTensor().element_size()) self.assertEqual(long, torch.LongTensor().element_size()) self.assertEqual(float, torch.FloatTensor().element_size()) self.assertEqual(double, torch.DoubleTensor().element_size()) self.assertEqual(bool, torch.BoolTensor().element_size()) self.assertEqual(bfloat16, torch.tensor([], dtype=torch.bfloat16).element_size()) self.assertEqual(complexfloat, torch.tensor([], dtype=torch.complex64).element_size()) self.assertEqual(complexdouble, torch.tensor([], dtype=torch.complex128).element_size()) self.assertGreater(byte, 0) self.assertGreater(char, 0) self.assertGreater(short, 0) self.assertGreater(int, 0) self.assertGreater(long, 0) self.assertGreater(float, 0) self.assertGreater(double, 0) self.assertGreater(bool, 0) self.assertGreater(bfloat16, 0) self.assertGreater(complexfloat, 0) self.assertGreater(complexdouble, 0) # These tests are portable, not necessarily strict for your system. self.assertEqual(byte, 1) self.assertEqual(char, 1) self.assertEqual(bool, 1) self.assertGreaterEqual(short, 2) self.assertGreaterEqual(int, 2) self.assertGreaterEqual(int, short) self.assertGreaterEqual(long, 4) self.assertGreaterEqual(long, int) self.assertGreaterEqual(double, float) def test_permute(self): orig = [1, 2, 3, 4, 5, 6, 7] perm = torch.randperm(7).tolist() x = torch.empty(*orig).fill_(0) new = [i - 1 for i in x.permute(*perm).size()] self.assertEqual(perm, new) self.assertEqual(x.size(), orig) def test_reversed(self): val = torch.arange(0, 10) self.assertEqual(reversed(val), torch.arange(9, -1, -1)) val = torch.arange(1, 10).view(3, 3) self.assertEqual(reversed(val), torch.tensor([[7, 8, 9], [4, 5, 6], [1, 2, 3]])) val = torch.tensor(42) self.assertEqual(reversed(val), torch.tensor(42)) def test_contains(self): x = torch.arange(0, 10) self.assertEqual(4 in x, True) self.assertEqual(12 in x, False) x = torch.arange(1, 10).view(3, 3) val = torch.arange(1, 4) self.assertEqual(val in x, True) val += 10 self.assertEqual(val in x, False) self.assertRaisesRegex( RuntimeError, "Tensor.__contains__ only supports Tensor or scalar, but you passed in a {}.".format(type("foo")), lambda: "foo" in x) self.assertRaisesRegex( RuntimeError, "Tensor.__contains__ only supports Tensor or scalar, but you passed in a {}.".format(type([1, 2])), lambda: [1, 2] in x) def test_deepcopy_parameter(self): from copy import deepcopy l = torch.nn.Linear(10, 1) s = l.state_dict(keep_vars=True) self.assertEqual(torch.nn.Parameter, type(s['weight'])) self.assertEqual(torch.nn.Parameter, type(s['bias'])) s2 = deepcopy(s) self.assertEqual(torch.nn.Parameter, type(s2['weight'])) self.assertEqual(torch.nn.Parameter, type(s2['bias'])) def test_pickle(self): import pickle a = torch.randn(5, 5) serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertEqual(a, b) def test_pickle_parameter(self): import pickle a = torch.nn.Parameter(torch.randn(5, 5)) serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertTrue(isinstance(b, torch.nn.Parameter)) self.assertEqual(a.requires_grad, b.requires_grad) self.assertEqual(a, b) def test_pickle_parameter_no_requires_grad(self): import pickle a = torch.nn.Parameter(torch.randn(5, 5), requires_grad=False) serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertTrue(isinstance(b, torch.nn.Parameter)) self.assertEqual(a.requires_grad, b.requires_grad) self.assertEqual(a, b) def test_pickle_dtype(self): t = torch.float32 serialized = pickle.dumps(t) b = pickle.loads(serialized) self.assertTrue(isinstance(b, torch.dtype)) self.assertEqual(id(b), id(t)) def test_pickle_size(self): a = torch.rand(10).size() serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertTrue(isinstance(b, torch.Size)) self.assertEqual(a, b) def test_pickle_function(self): # https://github.com/pytorch/pytorch/issues/37703 a = torch.tanh serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertEqual(a, b) def test_generator_cpu(self): # test default generators are equal self.assertEqual(torch.default_generator, torch.default_generator) # tests Generator API # manual_seed, seed, initial_seed, get_state, set_state g1 = torch.Generator() g2 = torch.Generator() g1.manual_seed(12345) g2.manual_seed(12345) self.assertEqual(g1.initial_seed(), g2.initial_seed()) g1.seed() g2.seed() self.assertNotEqual(g1.initial_seed(), g2.initial_seed()) g1 = torch.Generator() g2_state = g2.get_state() g2_randn = torch.randn(1, generator=g2) g1.set_state(g2_state) g1_randn = torch.randn(1, generator=g1) self.assertEqual(g1_randn, g2_randn) default_state = torch.default_generator.get_state() q = torch.empty(100) g1_normal = q.normal_() g2 = torch.Generator() g2.set_state(default_state) g2_normal = q.normal_(generator=g2) self.assertEqual(g1_normal, g2_normal) def test_invalid_generator_raises(self): self.assertRaises(RuntimeError, lambda: torch.Generator('opengl')) def _sobol_reference_samples(self, scramble: bool) -> torch.Tensor: if not scramble: # theoretical values from Joe Kuo 2010 return torch.tensor( [ [0., 0.], [0.5, 0.5], [0.75, 0.25], [0.25, 0.75], [0.375, 0.375], [0.875, 0.875], [0.625, 0.125], [0.125, 0.625], ], ) else: # theoretical values unknown: convergence properties checked return torch.tensor( [ [0.50860737, 0.29320504], [0.07116939, 0.89594537], [0.49354145, 0.11524881], [0.93097717, 0.70244044], [0.87266153, 0.23887917], [0.31021884, 0.57600391], [0.13687253, 0.42054182], [0.69931293, 0.77336788], ], ) def test_sobolengine_bounds(self, scramble: bool = False): engine = torch.quasirandom.SobolEngine(100, scramble=scramble, seed=123456) sample = engine.draw(512) self.assertTrue(torch.all(sample >= 0)) self.assertTrue(torch.all(sample <= 1)) def test_sobolengine_bounds_scrambled(self): self.test_sobolengine_bounds(scramble=True) def test_sobolengine_draw(self, scramble: bool = False): ref_sample = self._sobol_reference_samples(scramble=scramble) engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456) sample = engine.draw(n=len(ref_sample)) self.assertEqual(sample, ref_sample) self.assertEqual(engine.num_generated, len(ref_sample)) def test_sobolengine_draw_scrambled(self): self.test_sobolengine_draw(scramble=True) def test_sobolengine_first_point(self): for dtype in (torch.float, torch.double): engine = torch.quasirandom.SobolEngine(2, scramble=False) sample = engine.draw(1, dtype=dtype) self.assertTrue(torch.all(sample == 0)) self.assertEqual(sample.dtype, dtype) for dtype in (torch.float, torch.double): engine = torch.quasirandom.SobolEngine(2, scramble=True, seed=123456) sample = engine.draw(1, dtype=dtype) self.assertTrue(torch.all(sample != 0)) self.assertEqual(sample.dtype, dtype) def test_sobolengine_continuing(self, scramble: bool = False): ref_sample = self._sobol_reference_samples(scramble=scramble) engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456) n_half = len(ref_sample) // 2 _ = engine.draw(n=n_half) sample = engine.draw(n=n_half) torch.testing.assert_allclose(sample, ref_sample[n_half:]) def test_sobolengine_continuing_scrambled(self): self.test_sobolengine_continuing(scramble=True) def test_sobolengine_reset(self, scramble: bool = False): ref_sample = self._sobol_reference_samples(scramble=scramble) engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456) _ = engine.draw(n=len(ref_sample) // 2) engine.reset() self.assertEqual(engine.num_generated, 0) sample = engine.draw(n=len(ref_sample)) torch.testing.assert_allclose(sample, ref_sample) def test_sobolengine_reset_scrambled(self): self.test_sobolengine_reset(scramble=True) def test_sobolengine_fast_forward(self, scramble: bool = False): ref_sample = self._sobol_reference_samples(scramble=scramble) engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456) engine.fast_forward(4) sample = engine.draw(n=4) torch.testing.assert_allclose(sample, ref_sample[4:]) # alternate fast forwarding with sampling engine.reset() even_draws = [] for i in range(8): if i % 2 == 0: even_draws.append(engine.draw()) else: engine.fast_forward(1) torch.testing.assert_allclose( ref_sample[[i for i in range(8) if i % 2 == 0]], np.concatenate(even_draws), ) def test_sobolengine_fast_forward_scrambled(self): self.test_sobolengine_fast_forward(scramble=True) def test_sobolengine_distribution(self, scramble=False): d = 50 engine = torch.quasirandom.SobolEngine(d, scramble=scramble, seed=123456) sample = engine.draw(1024) torch.testing.assert_allclose( torch.mean(sample, dim=0), torch.full((d,), 0.5), atol=2, rtol=2 ) torch.testing.assert_allclose( np.percentile(sample, 25, axis=0), np.repeat(0.25, d), atol=2, rtol=2 ) torch.testing.assert_allclose( np.percentile(sample, 75, axis=0), np.repeat(0.75, d), atol=2, rtol=2 ) def test_sobolengine_distribution_scrambled(self): self.test_sobolengine_distribution(scramble=True) def test_sobolengine_draw_base2(self, scramble=False): ref_sample = self._sobol_reference_samples(scramble=scramble) engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456) sample = engine.draw_base2(2) self.assertEqual(ref_sample[:4], sample) # resampling still having N=2**n sample = engine.draw_base2(2) self.assertEqual(ref_sample[4:8], sample) def test_sobolengine_draw_base2_scrambled(self): self.test_sobolengine_draw_base2(scramble=True) def test_sobolengine_raise(self): maxdim = torch.quasirandom.SobolEngine.MAXDIM with self.assertRaises(ValueError): torch.quasirandom.SobolEngine(maxdim + 1) def test_sobolengine_high_dim(self): engine = torch.quasirandom.SobolEngine(1111, scramble=False, seed=123456) samples1 = engine.draw() vals1, counts1 = torch.unique(samples1, return_counts=True) samples2 = engine.draw() vals2, counts2 = torch.unique(samples2, return_counts=True) self.assertEqual(vals1.item(), 0.0) self.assertEqual(counts1.item(), 1111) self.assertEqual(vals2.item(), 0.5) self.assertEqual(counts1.item(), 1111) def test_parsing_int64(self): # accepts integer arguments x = torch.cumsum(torch.ones(5, 5), 0) self.assertEqual(x, torch.cumsum(torch.ones(5, 5), torch.tensor(0))) # doesn't accept floating point variables self.assertRaises(TypeError, lambda: torch.cumsum(torch.ones(5, 5), torch.tensor(0.))) def test_parsing_double(self): # accepts floating point and integer arguments x = torch.randn(2, 3) torch.isclose(x, x, 1, 1) self.assertTrue(torch.isclose(x, x, 1, 1).all()) self.assertTrue(torch.isclose(x, x, 1.5, 1.).all()) # accepts floating point and integer tensors self.assertTrue(torch.isclose(x, x, torch.tensor(1), torch.tensor(1)).all()) self.assertTrue(torch.isclose(x, x, torch.tensor(1.5), torch.tensor(1.)).all()) # doesn't accept variables with requires_grad self.assertRaises(TypeError, lambda: torch.isclose(x, x, torch.tensor(1.5), torch.tensor(1., requires_grad=True)).all()) def test_parsing_intlist(self): # parse with integer variables self.assertEqual(torch.Size([3, 4]), torch.ones((torch.tensor(3), torch.tensor(4))).shape) self.assertEqual(torch.Size([3, 4]), torch.ones(torch.tensor(3), torch.tensor(4)).shape) # parse with numpy integers self.assertEqual(torch.Size([3, 4]), torch.ones((np.array(3), np.int64(4))).shape) self.assertEqual(torch.Size([3, 4]), torch.ones(np.array(3), np.int64(4)).shape) self.assertEqual(torch.Size([3, 4]), torch.ones((np.int64(3), np.array(4))).shape) self.assertEqual(torch.Size([3, 4]), torch.ones(np.int64(3), np.array(4)).shape) # fail parse with float variables self.assertRaises(TypeError, lambda: torch.ones((torch.tensor(3.), torch.tensor(4)))) # fail parse with numpy floats self.assertRaises(TypeError, lambda: torch.ones((np.float(3.), torch.tensor(4)))) self.assertRaises(TypeError, lambda: torch.ones((np.array(3.), torch.tensor(4)))) # fail parse with > 1 element variables self.assertRaises(TypeError, lambda: torch.ones(torch.tensor(3, 3))) self.assertRaises(TypeError, lambda: torch.ones((torch.tensor(3, 3)))) self.assertRaises(TypeError, lambda: torch.ones(np.array(3, 3))) self.assertRaises(TypeError, lambda: torch.ones((np.array(3, 3)))) # fail parse with additional positional args after intlist arg self.assertRaisesRegex(TypeError, "received an invalid combination of arguments", lambda: torch.LongTensor((6, 0), 1, 1, 0)) self.assertRaisesRegex(TypeError, "missing 1 required positional arguments", lambda: torch.tensor().new_zeros((5, 5), 0)) def test_half_tensor(self): devices = ["cpu"] if torch.cuda.is_available(): devices.append("cuda") # contiguous tensor # non-contiguous tensor # dense non-overlapping tensor # non-dense non-overlapping sliced tensor # non-dense overlapping equal strides for device in devices: tset = ( torch.randn(4, 3, 2, device=device, dtype=torch.float).contiguous(), torch.randn(4, 3, 2, device=device, dtype=torch.float).transpose(0, 1), torch.randn(4, 3, 2, device=device, dtype=torch.float), torch.randn(4, 3, 2, device=device, dtype=torch.float)[:, :, ::2], torch.empty_strided( (4, 2, 3), (10, 3, 3), device=device, dtype=torch.float ).copy_(torch.rand((4, 2, 3), dtype=torch.float, device=device)), ) for x in tset: self.assertEqual(x.half().float(), x, atol=1e-3, rtol=0) xh = x.half() with tempfile.NamedTemporaryFile() as f: torch.save(xh, f) f.seek(0) xh2 = torch.load(f) self.assertEqual(xh.float(), xh2.float()) def test_from_buffer(self): a = bytearray([1, 2, 3, 4]) self.assertEqual(torch.ByteStorage.from_buffer(a).tolist(), [1, 2, 3, 4]) shorts = torch.ShortStorage.from_buffer(a, 'big') self.assertEqual(shorts.size(), 2) self.assertEqual(shorts.tolist(), [258, 772]) ints = torch.IntStorage.from_buffer(a, 'little') self.assertEqual(ints.size(), 1) self.assertEqual(ints[0], 67305985) f = bytearray([0x40, 0x10, 0x00, 0x00]) floats = torch.FloatStorage.from_buffer(f, 'big') self.assertEqual(floats.size(), 1) self.assertEqual(floats[0], 2.25) f = bytearray([0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x40]) bools = torch.BoolStorage.from_buffer(f, 'big') self.assertEqual(bools.size(), 8) self.assertEqual(bools.tolist(), [False, True, True, True, True, True, True, True]) self.assertEqual(bools.type(), 'torch.BoolStorage') f = bytearray(b'\x80\x02\x8a\nl\xfc\x9cF\xf9 j\xa8P\x19.\x80\x02M\xe9') bools = torch.BoolStorage.from_buffer(f, 'big') self.assertEqual(bools.size(), 19) f = bytearray(b'\0x4A') bools = torch.BoolStorage.from_buffer(f, 'big') self.assertEqual(bools.size(), 4) self.assertEqual(bools.tolist(), [False, True, True, True]) def test_storage_casts(self): storage = torch.IntStorage([-1, 0, 1, 2, 3, 4]) self.assertEqual(storage.size(), 6) self.assertEqual(storage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(storage.type(), 'torch.IntStorage') self.assertIs(storage.dtype, torch.int32) floatStorage = storage.float() self.assertEqual(floatStorage.size(), 6) self.assertEqual(floatStorage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(floatStorage.type(), 'torch.FloatStorage') self.assertEqual(floatStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(floatStorage.dtype, torch.float32) halfStorage = storage.half() self.assertEqual(halfStorage.size(), 6) self.assertEqual(halfStorage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(halfStorage.type(), 'torch.HalfStorage') self.assertEqual(halfStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(halfStorage.dtype, torch.float16) bfloat16Storage = storage.bfloat16() self.assertEqual(bfloat16Storage.size(), 6) self.assertEqual(bfloat16Storage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(bfloat16Storage.type(), 'torch.BFloat16Storage') self.assertEqual(bfloat16Storage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(bfloat16Storage.dtype, torch.bfloat16) longStorage = storage.long() self.assertEqual(longStorage.size(), 6) self.assertEqual(longStorage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(longStorage.type(), 'torch.LongStorage') self.assertEqual(longStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(longStorage.dtype, torch.int64) shortStorage = storage.short() self.assertEqual(shortStorage.size(), 6) self.assertEqual(shortStorage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(shortStorage.type(), 'torch.ShortStorage') self.assertEqual(shortStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(shortStorage.dtype, torch.int16) doubleStorage = storage.double() self.assertEqual(doubleStorage.size(), 6) self.assertEqual(doubleStorage.tolist(), [-1.0, 0.0, 1.0, 2.0, 3.0, 4.0]) self.assertEqual(doubleStorage.type(), 'torch.DoubleStorage') self.assertEqual(doubleStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(doubleStorage.dtype, torch.float64) charStorage = storage.char() self.assertEqual(charStorage.size(), 6) self.assertEqual(charStorage.tolist(), [-1.0, 0.0, 1.0, 2.0, 3.0, 4.0]) self.assertEqual(charStorage.type(), 'torch.CharStorage') self.assertEqual(charStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(charStorage.dtype, torch.int8) byteStorage = storage.byte() self.assertEqual(byteStorage.size(), 6) self.assertEqual(byteStorage.tolist(), [255, 0, 1, 2, 3, 4]) self.assertEqual(byteStorage.type(), 'torch.ByteStorage') self.assertEqual(byteStorage.int().tolist(), [255, 0, 1, 2, 3, 4]) self.assertIs(byteStorage.dtype, torch.uint8) boolStorage = storage.bool() self.assertEqual(boolStorage.size(), 6) self.assertEqual(boolStorage.tolist(), [True, False, True, True, True, True]) self.assertEqual(boolStorage.type(), 'torch.BoolStorage') self.assertEqual(boolStorage.int().tolist(), [1, 0, 1, 1, 1, 1]) self.assertIs(boolStorage.dtype, torch.bool) complexfloat_storage = torch.ComplexFloatStorage([-1, 0, 1 + 2j, 2.5j, 3.5, 4 - 2j]) self.assertEqual(complexfloat_storage.size(), 6) self.assertEqual(complexfloat_storage.tolist(), [-1, 0, 1 + 2j, 2.5j, 3.5, 4 - 2j]) self.assertEqual(complexfloat_storage.type(), 'torch.ComplexFloatStorage') self.assertIs(complexfloat_storage.dtype, torch.complex64) complexdouble_storage = complexfloat_storage.complex_double() self.assertEqual(complexdouble_storage.size(), 6) self.assertEqual(complexdouble_storage.tolist(), [-1, 0, 1 + 2j, 2.5j, 3.5, 4 - 2j]) self.assertEqual(complexdouble_storage.type(), 'torch.ComplexDoubleStorage') self.assertIs(complexdouble_storage.dtype, torch.complex128) def test_from_file(self): def assert_with_filename(filename): size = 10000 s1 = torch.FloatStorage.from_file(filename, True, size) t1 = torch.FloatTensor(s1).copy_(torch.randn(size)) # check mapping s2 = torch.FloatStorage.from_file(filename, True, size) t2 = torch.FloatTensor(s2) self.assertEqual(t1, t2, atol=0, rtol=0) # check changes to t1 from t2 rnum = random.uniform(-1, 1) t1.fill_(rnum) self.assertEqual(t1, t2, atol=0, rtol=0) # check changes to t2 from t1 rnum = random.uniform(-1, 1) t2.fill_(rnum) self.assertEqual(t1, t2, atol=0, rtol=0) # release the tensors del s1, t1, s2, t2 with TemporaryFileName() as fname: assert_with_filename(fname) if IS_FILESYSTEM_UTF8_ENCODING: with TemporaryDirectoryName(suffix='中文') as dname, TemporaryFileName(dir=dname) as fname: assert_with_filename(fname) def test_torch_from_file(self): def assert_with_filename(filename): size = 10000 s1 = torch.from_file(filename, True, size, dtype=torch.float) t1 = torch.FloatTensor(s1).copy_(torch.randn(size)) # check mapping s2 = torch.from_file(filename, True, size, dtype=torch.float) t2 = torch.FloatTensor(s2) self.assertEqual(t1, t2, atol=0, rtol=0) # check changes to t1 from t2 rnum = random.uniform(-1, 1) t1.fill_(rnum) self.assertEqual(t1, t2, atol=0, rtol=0) # check changes to t2 from t1 rnum = random.uniform(-1, 1) t2.fill_(rnum) self.assertEqual(t1, t2, atol=0, rtol=0) # release the tensors del s1, t1, s2, t2 with TemporaryFileName() as fname: assert_with_filename(fname) if IS_FILESYSTEM_UTF8_ENCODING: with TemporaryDirectoryName(suffix='中文') as dname, TemporaryFileName(dir=dname) as fname: assert_with_filename(fname) def test_print(self): default_type = torch.tensor([]).type() for t in torch._tensor_classes: if t == torch.HalfTensor: continue # HalfTensor does not support fill if t.is_sparse: continue if t.is_cuda and not torch.cuda.is_available(): continue obj = t(100, 100).fill_(1) obj.__repr__() str(obj) # test half tensor obj = torch.rand(100, 100, device='cpu').half() obj.__repr__() str(obj) for t in torch._storage_classes: if t == torch.BFloat16Storage: continue # Fix once fill is enabled for bfloat16 if t.is_cuda and not torch.cuda.is_available(): continue if t == torch.BoolStorage or t == torch.cuda.BoolStorage: obj = t(100).fill_(True) else: obj = t(100).fill_(1) obj.__repr__() str(obj) # test complex tensor # complex tensor print uses two formatters, one for real values # and the other for imag values. this is consistent with numpy x = torch.tensor([2.3 + 4j, 7 + 6j]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([2.3000+4.j, 7.0000+6.j])''') # test scientific notation for complex tensors x = torch.tensor([1e28 + 2j , -1e-28j]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.0000e+28+2.0000e+00j, -0.0000e+00-1.0000e-28j])''') # test big integer x = torch.tensor(2341234123412341) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor(2341234123412341)''') # test scientific notation x = torch.tensor([1e28, 1e-28]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.0000e+28, 1.0000e-28])''') # test scientific notation using set_printoptions x = torch.tensor([1e2, 1e-2]) torch.set_printoptions(sci_mode=True) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.0000e+02, 1.0000e-02])''') torch.set_printoptions(sci_mode=False) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([ 100.0000, 0.0100])''') torch.set_printoptions(sci_mode=None) # reset to the default value # test no leading space if all elements positive x = torch.tensor([1, 2]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1, 2])''') # test for leading space if there are negative elements x = torch.tensor([1, -2]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([ 1, -2])''') # test inf and nan x = torch.tensor([4, inf, 1.5, -inf, 0, nan, 1]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([4.0000, inf, 1.5000, -inf, 0.0000, nan, 1.0000])''') y = torch.tensor([4, inf, complex(1.5, inf), complex(-inf, 4), 0, complex(nan, inf), complex(3, nan)]) self.assertEqual(y.__repr__(), str(y)) expected_str = '''\ tensor([4.0000+0.j, inf+0.j, 1.5000+infj, -inf+4.j, 0.0000+0.j, nan+infj, 3.0000+nanj])''' self.assertExpectedInline(str(y), expected_str) # test dtype torch.set_default_dtype(torch.float) x = torch.tensor([1e-324, 1e-323, 1e-322, 1e307, 1e308, 1e309], dtype=torch.float64) self.assertEqual(x.__repr__(), str(x)) expected_str = '''\ tensor([ 0.0000e+00, 9.8813e-324, 9.8813e-323, 1.0000e+307, 1.0000e+308, inf], dtype=torch.float64)''' self.assertExpectedInline(str(x), expected_str) # test changing default dtype torch.set_default_dtype(torch.float64) self.assertEqual(x.__repr__(), str(x)) expected_str = '''\ tensor([ 0.0000e+00, 9.8813e-324, 9.8813e-323, 1.0000e+307, 1.0000e+308, inf])''' self.assertExpectedInline(str(x), expected_str) # test summary x = torch.zeros(10000) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([0., 0., 0., ..., 0., 0., 0.])''') # test internal summary function x = torch.rand(1, 20, 5, 30) summary = torch._tensor_str.get_summarized_data(x) self.assertEqual(summary.shape, (1, 6, 5, 6)) first_and_last = [0, 1, 2, -3, -2, -1] self.assertEqual(summary, x[:, first_and_last][..., first_and_last]) # test device if torch.cuda.is_available(): x = torch.tensor([123], device='cuda:0') self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([123], device='cuda:0')''') # test changing default to cuda torch.set_default_tensor_type(torch.cuda.FloatTensor) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([123])''') # test printing a tensor on a different gpu than current one. if torch.cuda.device_count() >= 2: with torch.cuda.device(1): self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([123], device='cuda:0')''') # test printing cpu tensor when default device is cuda y = torch.tensor([123], device='cpu') self.assertEqual(y.__repr__(), str(y)) self.assertExpectedInline(str(y), '''tensor([123], device='cpu')''') torch.set_default_tensor_type(default_type) # test integral floats and requires_grad x = torch.tensor([123.], requires_grad=True) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([123.], requires_grad=True)''') # test non-contiguous print # sliced tensor should have > PRINT_OPTS.threshold elements x = torch.ones(100, 2, 2, 10) y = x.as_strided(size=(100, 2, 10), stride=(2 * 2 * 10, 2 * 10, 1)) self.assertEqual(str(y), y.__repr__()) expected_str = '''\ tensor([[[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]], [[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]], [[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]], ..., [[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]], [[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]], [[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]]])\ ''' self.assertExpectedInline(str(y), expected_str) x = torch.ones(100, 2, 2, 10) * (1 + 1j) y = x.as_strided(size=(100, 2, 10), stride=(2 * 2 * 10, 2 * 10, 1)) self.assertEqual(str(y), y.__repr__()) expected_str = '''\ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]], [[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]], [[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]], ..., [[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]], [[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]], [[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]]])\ ''' self.assertExpectedInline(str(y), expected_str) # test print 0-dim tensor: there's no 0-dim in Numpy, we match arrayprint style x = torch.tensor(0.00002) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor(2.0000e-05)''') # test print boolean tensor x = torch.tensor([True]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([True])''') x = torch.tensor(True) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor(True)''') # [Numpy] test print float in sci_mode when min < 0.0001. x = torch.tensor([0.00002]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([2.0000e-05])''') # [Numpy] test print complex in sci_mode when real_min < 0.0001 and (or) imag_min < 0.0001. x = torch.tensor([0.00002]) * (1 + 1j) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([2.0000e-05+2.0000e-05j])''') # [Numpy] test print float in sci_mode when max > 1e8. # TODO: Pytorch uses fixed precision to print, while Numpy uses dragon4_scientific # to do automatic trimming and padding. x = torch.tensor([123456789.]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.2346e+08])''') # [Numpy] test print float in sci_mode when max / min > 1000. x = torch.tensor([0.01, 11]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.0000e-02, 1.1000e+01])''') # [Numpy] test print int max / min > 1000, no sci_mode x = torch.tensor([1, 1010]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([ 1, 1010])''') # [Numpy] test print int > 1e8, no sci_mode x = torch.tensor([1000000000]) # 1e9 self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1000000000])''') # [Numpy] test printing float in int_mode x = torch.tensor([1., 1000.]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([ 1., 1000.])''') # [Numpy] test printing float in int_mode in sci format when max / min > 1000. x = torch.tensor([1., 1010.]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.0000e+00, 1.0100e+03])''') def test_sizeof(self) -> None: sizeof_empty = torch.randn(0).storage().__sizeof__() sizeof_10 = torch.randn(10).storage().__sizeof__() sizeof_100 = torch.randn(100).storage().__sizeof__() self.assertEqual((sizeof_100 - sizeof_empty) // (sizeof_10 - sizeof_empty), 10) self.assertEqual((sizeof_100 - sizeof_empty) % (sizeof_10 - sizeof_empty), 0) sizeof_empty = torch.randn(0).to(torch.uint8).storage().__sizeof__() sizeof_10 = torch.randn(10).to(torch.uint8).storage().__sizeof__() sizeof_100 = torch.randn(100).to(torch.uint8).storage().__sizeof__() self.assertEqual((sizeof_100 - sizeof_empty) // (sizeof_10 - sizeof_empty), 10) self.assertEqual((sizeof_100 - sizeof_empty) % (sizeof_10 - sizeof_empty), 0) def test_iter(self) -> None: x = torch.randn(5, 5) for i, sub in enumerate(x): self.assertEqual(sub, x[i]) x = torch.tensor([]) self.assertEqual(list(x), []) def test_assertEqual(self) -> None: x = torch.FloatTensor([0]) self.assertEqual(x, 0) xv = torch.autograd.Variable(x) self.assertEqual(xv, 0) self.assertEqual(x, xv) self.assertEqual(xv, x) # Tests that setting atol or rtol without the other throws self.assertRaises(AssertionError, lambda: self.assertEqual(x, xv, atol=4)) self.assertRaises(AssertionError, lambda: self.assertEqual(x, xv, rtol=4)) self.assertRaisesRegex(TypeError, "takes from 3 to 4 positional arguments", lambda: self.assertEqual(x, xv, "", 1.0)) # type: ignore[misc] def test_new(self) -> None: x = torch.autograd.Variable(torch.tensor([])) y = torch.autograd.Variable(torch.randn(4, 4)) z = torch.autograd.Variable(torch.IntTensor([1, 2, 3])) self.assertEqual(x.new().shape, [0]) self.assertEqual(x.new(), x) self.assertEqual(x.new(1, 2).shape, [1, 2]) self.assertEqual(x.new(torch.Size([3, 4])).shape, [3, 4]) self.assertEqual(x.new([3, 4]).shape, [2]) self.assertEqual(x.new([3, 4]).tolist(), [3, 4]) self.assertEqual(x.new((3, 4)).tolist(), [3, 4]) self.assertEqual(x.new([np.int32(3), np.float64(4)]).tolist(), [3, 4]) self.assertEqual(x.new(np.array((3, 4))).tolist(), [3, 4]) self.assertEqual(x.new([z[2], z[0] + 3]).tolist(), [3, 4]) self.assertEqual(x.new(size=(3, 4)).shape, [3, 4]) self.assertEqual(x.new(()).shape, [0]) self.assertEqual(x.new(y.storage()).data_ptr(), y.data_ptr()) self.assertEqual(x.new(y).data_ptr(), y.data_ptr()) self.assertIsNot(x.new(y), y) self.assertRaises(TypeError, lambda: x.new(z)) # TypeError would be better self.assertRaises(RuntimeError, lambda: x.new(z.storage())) @unittest.skipIf(PYTORCH_CUDA_MEMCHECK, "is_pinned uses failure to detect pointer property") def test_pin_memory(self): x = torch.randn(3, 5) self.assertFalse(x.is_pinned()) if not torch.cuda.is_available(): self.assertRaises(RuntimeError, lambda: x.pin_memory()) else: pinned = x.pin_memory() self.assertTrue(pinned.is_pinned()) self.assertEqual(pinned, x) self.assertNotEqual(pinned.data_ptr(), x.data_ptr()) # test that pin_memory on already pinned tensor has no effect self.assertIs(pinned, pinned.pin_memory()) self.assertEqual(pinned.data_ptr(), pinned.pin_memory().data_ptr()) def test_error_msg_type_translation(self): with self.assertRaisesRegex( RuntimeError, # message includes both Double and Long '(?=.*Double)(?=.*Long)'): # Calls model with a LongTensor input but DoubleTensor weights input = torch.zeros(1, 1, 1, 6, dtype=torch.long) weight = torch.nn.Parameter(torch.zeros(1, 1, 1, 3, dtype=torch.double)) model = torch.nn.Conv2d(1, 1, (1, 3), stride=1, padding=0, bias=False) model.weight = weight out = model(input) def test_apply(self): x = torch.arange(1, 6) res = x.clone().apply_(lambda k: k + k) self.assertEqual(res, x * 2) self.assertRaises(TypeError, lambda: x.apply_(lambda k: "str")) def test_map(self): x = torch.autograd.Variable(torch.randn(3, 3)) y = torch.autograd.Variable(torch.randn(3)) res = x.clone() res.map_(y, lambda a, b: a + b) self.assertEqual(res, x + y) self.assertRaisesRegex(TypeError, "not callable", lambda: res.map_(y, "str")) def test_map2(self): x = torch.autograd.Variable(torch.randn(3, 3)) y = torch.autograd.Variable(torch.randn(3)) z = torch.autograd.Variable(torch.randn(1, 3)) res = x.clone() res.map2_(y, z, lambda a, b, c: a + b * c) self.assertEqual(res, x + y * z) z.requires_grad = True self.assertRaisesRegex( RuntimeError, "requires grad", lambda: res.map2_(y, z, lambda a, b, c: a + b * c)) def test_Size(self): x = torch.Size([1, 2, 3]) self.assertIsInstance(x, tuple) self.assertEqual(x[0], 1) self.assertEqual(x[1], 2) self.assertEqual(x[2], 3) self.assertEqual(len(x), 3) self.assertRaises(TypeError, lambda: torch.Size(torch.ones(3))) self.assertIsInstance(x * 2, torch.Size) self.assertIsInstance(x[:-1], torch.Size) self.assertIsInstance(x + x, torch.Size) def test_Size_scalar(self): three = torch.tensor(3) two = torch.tensor(2) x = torch.Size([0, 1, two, three, 4]) for i in range(1, 5): self.assertEqual(x[i], i) def test_Size_iter(self): for sizes in [iter([1, 2, 3, 4, 5]), range(1, 6)]: x = torch.Size(sizes) for i in range(0, 5): self.assertEqual(x[i], i + 1) def test_t_not_2d_error(self): self.assertRaises(RuntimeError, lambda: torch.randn(2, 3, 4).t()) self.assertRaises(RuntimeError, lambda: torch.randn(2, 3, 4).t_()) # skip this test for now as it affects all tests @unittest.skipIf(True, "flush_denormal not supported") def test_set_flush_denormal(self): tiny_float = 1e-42 tiny_double = 1e-320 float_tensor = torch.FloatTensor([1.0, tiny_float]) double_tensor = torch.DoubleTensor([1.0, tiny_float, tiny_double]) self.assertEqual(float_tensor[0], 1.0, atol=0.0, rtol=0) self.assertEqual(float_tensor[1], tiny_float, atol=tiny_float / 16, rtol=0) self.assertEqual(double_tensor[0], 1.0, atol=0.0, rtol=0) self.assertEqual(double_tensor[1], tiny_float, atol=0.0, rtol=0) self.assertEqual(double_tensor[2], tiny_double, atol=0.0, rtol=0) torch.set_flush_denormal(True) self.assertEqual(float_tensor[0], 1.0, atol=0.0, rtol=0) self.assertEqual(float_tensor[1], 0.0, atol=0.0, rtol=0) # tiny_float to zero self.assertEqual(double_tensor[0], 1.0, atol=0.0, rtol=0) # tiny_float is not converted to zero in double type self.assertEqual(double_tensor[1], tiny_float, atol=0.0, rtol=0) self.assertEqual(double_tensor[2], 0.0, atol=0.0, rtol=0) # tiny_double to zero torch.set_flush_denormal(False) def test_show_config(self): # We can't usefully test the output; just make sure this doesn't crash torch.__config__.show() @unittest.skipIf(IS_FBCODE, "CXX_FLAGS is only for OSS build.") def test_cxx_flags(self): torch.__config__._cxx_flags() def test_parallel_info(self): torch.__config__.parallel_info() @slowTest def test_slow_test(self): # Just a smoketest to make sure our slowTest decorator works. pass def test_is_nonzero(self): with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"): torch.tensor([]).is_nonzero() with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"): torch.tensor([0, 0]).is_nonzero() self.assertFalse(torch.tensor(0).is_nonzero()) self.assertTrue(torch.tensor(1).is_nonzero()) self.assertFalse(torch.tensor([0]).is_nonzero()) self.assertTrue(torch.tensor([1]).is_nonzero()) self.assertFalse(torch.tensor([[0]]).is_nonzero()) self.assertTrue(torch.tensor([[1]]).is_nonzero()) self.assertTrue(torch.tensor(0.1).is_nonzero()) self.assertTrue(torch.tensor(-0.1).is_nonzero()) self.assertFalse(torch.tensor(0.0).is_nonzero()) self.assertTrue(torch.tensor(True).is_nonzero()) self.assertFalse(torch.tensor(False).is_nonzero()) self.assertFalse(torch.tensor(0 + 0j).is_nonzero()) self.assertTrue(torch.tensor(0 + 0.1j).is_nonzero()) def test_assert_async(self): with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"): torch._assert_async(torch.tensor([])) with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"): torch._assert_async(torch.tensor([0, 0])) with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"): torch._assert_async(torch.tensor(0)) torch._assert_async(torch.tensor(1)) torch._assert_async(torch.tensor(0.1)) torch._assert_async(torch.tensor(-0.1)) with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"): torch._assert_async(torch.tensor(0.0)) torch._assert_async(torch.tensor(True)) with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"): torch._assert_async(torch.tensor(False)) torch._assert_async(torch.tensor(0 + 0.1j)) with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"): torch._assert_async(torch.tensor(0 + 0j)) # NB: we must not be built with CUDA; if we are built with CUDA but no CUDA # is available, we get a different error. @unittest.skipIf(torch.backends.cuda.is_built() or IS_SANDCASTLE, "CUDA is built, can't test CUDA not built error") def test_cuda_not_built(self): msg = "Torch not compiled with CUDA enabled" self.assertRaisesRegex(AssertionError, msg, lambda: torch.cuda.current_device()) self.assertRaisesRegex(AssertionError, msg, lambda: torch.tensor([1], device="cuda")) self.assertRaisesRegex(AssertionError, msg, lambda: torch.tensor([1]).cuda()) self.assertRaisesRegex(TypeError, msg, lambda: torch.cuda.FloatTensor()) self.assertRaisesRegex(TypeError, msg, lambda: torch.set_default_tensor_type(torch.cuda.FloatTensor)) self.assertRaisesRegex(AssertionError, msg, lambda: torch.tensor([1]).to(device="cuda")) def test_has_internal_overlap(self): OVERLAP_NO = 0 OVERLAP_YES = 1 OVERLAP_TOO_HARD = 2 # Check for contiguous tensors a = torch.randn(3, 3) self.assertEqual(torch._debug_has_internal_overlap(a), OVERLAP_NO) # Checks for zero strides b = torch.randn(1, 3) b_expanded = b.expand(4, 3) self.assertEqual(torch._debug_has_internal_overlap(b_expanded), OVERLAP_YES) # Check for zero strided, size 1 axis, in non-contiguous storage (gh-33812) c = torch.randn(10).as_strided([2, 1, 5], [1, 0, 2]) self.assertEqual(torch._debug_has_internal_overlap(c), OVERLAP_NO) c = torch.randn(2, 1, 10)[::2].as_strided((2, 1, 5), (10, 0, 2)) self.assertEqual(torch._debug_has_internal_overlap(c), OVERLAP_TOO_HARD) def test_allow_tensor_metadata_change(self): def do_test(t): with self.assertRaisesRegex( RuntimeError, "set_sizes_contiguous is not allowed on a Tensor created from .data or .detach()"): t.resize_((2, 1)) with self.assertRaisesRegex( RuntimeError, "set_storage is not allowed on a Tensor created from .data or .detach()"): t.set_() with self.assertRaisesRegex( RuntimeError, "set_storage_offset is not allowed on a Tensor created from .data or .detach()"): t.set_(t.storage(), 0, t.size(), list(t.stride())) do_test(torch.tensor([[1, 2]]).data) do_test(torch.tensor([[1, 2]]).detach()) def test_c10_layer_norm(self): # test that we can call c10 ops and they return a reasonable result X = torch.rand(5, 5, dtype=torch.float) weight = torch.rand(*X.size()[1:], dtype=torch.float) bias = torch.rand(*X.size()[1:], dtype=torch.float) epsilon = 1e-4 expected_norm = torch.nn.functional.layer_norm( X, X.size()[1:], weight=weight, bias=bias, eps=epsilon) actual_norm, actual_mean, actual_stdev = \ torch.ops._caffe2.LayerNorm(torch.tensor(X), torch.tensor( weight), torch.tensor(bias), 1, epsilon, True) torch.testing.assert_allclose(expected_norm, actual_norm) def test_memory_format(self): def test_helper(x, memory_format): y = x.contiguous(memory_format=memory_format) self.assertFalse(y.is_contiguous()) self.assertTrue(y.is_contiguous(memory_format=memory_format)) self.assertEqual(y, x) test_helper(torch.randn(4, 3, 8, 8), torch.channels_last) test_helper(torch.randn(4, 3, 8, 8, 8), torch.channels_last_3d) def test_memory_format_contiguous_returns_same_tensor_if_already_satisfies(self): def test_helper(x, memory_format): alias = x.contiguous(memory_format=memory_format) alias.fill_(7) self.assertEqual(x, alias) test_helper(torch.randn(4, 8, 8, 3).permute(0, 3, 1, 2), torch.channels_last) test_helper(torch.randn(4, 8, 8, 8, 3).permute(0, 4, 1, 2, 3), torch.channels_last_3d) def test_memory_format_empty(self): def test_helper(dim1, dim2, memory_format): with self.assertRaises(RuntimeError): x = torch.empty(dim1, memory_format=memory_format) x = torch.empty(dim2, memory_format=memory_format) self.assertTrue(x.is_contiguous(memory_format=memory_format)) test_helper((3, 3), (3, 3, 3, 3), torch.channels_last) test_helper((3, 3, 3), (3, 3, 3, 3, 3), torch.channels_last_3d) def test_subclass_tensors(self): # raise an error when trying to subclass FloatTensor with self.assertRaisesRegex(TypeError, "type 'torch.FloatTensor' is not an acceptable base type"): class Foo1(torch.FloatTensor): pass # but allow subclassing Tensor: class Foo2(torch.Tensor): def foo(self): return 5 f = Foo2() self.assertEqual(f.foo(), 5) def test_ndim(self): a = torch.randn(1, 2, 3) self.assertEqual(3, a.ndim) b = torch.randn(()) self.assertEqual(0, b.ndim) c = torch.randn(1, 0) self.assertEqual(2, c.ndim) def test_fill_diagonal(self): a1 = torch.randn(7, 3) a2 = a1.clone() v = 1 for i in range(3): a2[i][i] = v a1.fill_diagonal_(v) self.assertEqual(a1, a2) b1 = torch.randn(7, 3) b2 = b1.clone() for i in range(3): b2[i][i] = v b2[i + 4][i] = v b1.fill_diagonal_(v, wrap=True) self.assertEqual(b1, b2) c1 = torch.rand(3, 3, 3) c2 = c1.clone() for i in range(3): c2[i][i][i] = v c1.fill_diagonal_(v) self.assertEqual(c1, c2) # non-contiguous tensor d1 = torch.rand(3, 3, 3)[:, 1, ...] d2 = d1.clone() for i in range(3): d2[i][i] = v d1.fill_diagonal_(v) self.assertEqual(d1, d2) e1 = torch.rand(7, 3, 3)[:, 1, ...] e2 = e1.clone() for i in range(3): e2[i][i] = v e2[i + 4][i] = v e1.fill_diagonal_(v, wrap=True) self.assertEqual(e1, e2) def test_batch_norm_cpu_inference(self): # input nchw in (2,1,1,1), (2,2,2,2) inputs = [ torch.tensor([[[[-0.5000]]], [[[0.5000]]]]), torch.tensor([ [ [[-0.5000, 0.5000], [-1.0000, 1.0000]], [[-0.2500, -0.5000], [0.2500, 0.5000]] ], [ [[0.1000, 1.0000], [1.0000, 0.1000]], [[1.0000, 0.5000], [1.5000, -1.5000]] ]])] # output nchw in (2,1,1,1), (2,2,2,2) outputs = [ torch.tensor([ [[[-0.499997496604919433593750000]]], [[[0.499997496604919433593750000]]]]), torch.tensor([ [[[-0.499997496604919433593750000, 0.499997496604919433593750000], [-0.999994993209838867187500000, 0.999994993209838867187500000]], [[-0.249998748302459716796875000, -0.499997496604919433593750000], [0.249998748302459716796875000, 0.499997496604919433593750000]]], [[[0.099999502301216125488281250, 0.999994993209838867187500000], [0.999994993209838867187500000, 0.099999502301216125488281250]], [[0.999994993209838867187500000, 0.499997496604919433593750000], [1.499992489814758300781250000, -1.499992489814758300781250000]]]])] for i in range(len(inputs)): for affine in [False, True]: m = torch.nn.BatchNorm2d(inputs[i].size()[1], 1e-05, 0.1, affine=affine) m.eval() # contiguous case input1 = inputs[i].contiguous() output1 = m(input1) # non-contiguous case input2 = input1.permute(0, 1, 3, 2) output2 = m(input2).permute(0, 1, 3, 2) # channels last case input3 = input1.contiguous(memory_format=torch.channels_last) output3 = m(input3) self.assertEqual(output3, outputs[i]) self.assertEqual(output3, output1) self.assertEqual(output3, output2) @noarchTest def test_empty_meta(self): x = torch.empty(2 ** 20, 2 ** 20, device='meta') y = torch.empty(2 ** 20, device='meta') z = x + y self.assertEqual(z.size(), (2 ** 20, 2 ** 20)) self.assertRaises(RuntimeError, lambda: z[0][0].item()) @noarchTest def test_upsample_nearest1d_meta(self): # TODO: this test should be triggered by test_nn.py but right # now meta is not enabled (and even if it was, we are probably # missing too many meta functions to get through the test unmolested) # NB: Can't make the exponent too big, or it will overflow # signed 64-bit integer x = torch.empty(2 * 10 ** 8, 3, 2 * 10 ** 8, device='meta') z = torch.nn.functional.interpolate(x, scale_factor=2) self.assertEqual(z.size(), (2 * 10 ** 8, 3, 4 * 10 ** 8)) self.assertRaises(RuntimeError, lambda: z[0][0][0].item()) # TODO: the out tests cannot be triggered by test_nn.py because # we don't actually do out= arguments for nn functions, so there # is no public API by which to get the out version # interpolate doesn't seem to support out= # (not sure why passing None here doesn't work? How strange...) z = torch.empty(0, device='meta') torch._C._nn.upsample_nearest1d(x, (4 * 10 ** 8,), 2, out=z) self.assertEqual(z.size(), (2 * 10 ** 8, 3, 4 * 10 ** 8)) self.assertRaises(RuntimeError, lambda: z[0][0][0].item()) @noarchTest def test_upsample_nearest2d_meta(self): # TODO: the out tests cannot be triggered by test_nn.py because # we don't actually do out= arguments for nn functions, so there # is no public API by which to get the out version # Make sure we don't clobber strides of out tensor. NB: this # test must be done on 2d/3d, because 1d doesn't have any meaningful # layout support x = torch.empty(4, 3, 8, 8, device='meta') out = torch.empty(4, 3, 16, 16, device='meta', memory_format=torch.channels_last) torch._C._nn.upsample_nearest2d(x, (16, 16), out=out) self.assertTrue(out.is_contiguous(memory_format=torch.channels_last)) x = torch.empty(4, 3, 8, 8, device='meta', memory_format=torch.channels_last) out = torch.empty(4, 3, 16, 16, device='meta') torch._C._nn.upsample_nearest2d(x, (16, 16), out=out) self.assertTrue(out.is_contiguous()) # But if resize occurs, do clobber x = torch.empty(4, 3, 8, 8, device='meta', memory_format=torch.channels_last) out = torch.empty(0, device='meta') torch._C._nn.upsample_nearest2d(x, (16, 16), out=out) self.assertTrue(out.is_contiguous(memory_format=torch.channels_last)) # Complain if out dtype mismatch x = torch.empty(4, 3, 8, 8, device='meta', dtype=torch.float) out = torch.empty(4, 3, 16, 16, device='meta', dtype=torch.double) self.assertExpectedRaisesInline( RuntimeError, lambda: torch._C._nn.upsample_nearest2d(x, (16, 16), out=out), """Expected out tensor to have dtype float, but got double instead""" ) # Complain if out device mismatch x = torch.empty(0, 3, 8, 8, device='meta') out = torch.empty(0, 3, 16, 16, device='cpu') self.assertExpectedRaisesInline( RuntimeError, lambda: torch._C._nn.upsample_nearest2d(x, (16, 16), out=out), """Expected out tensor to have device meta, but got cpu instead""" ) @noarchTest def test_detach_meta(self): x = torch.empty(2, device='meta') # This used to segfault self.assertRaises(RuntimeError, lambda: x.detach().storage()) @noarchTest def test_add_meta_scalar(self): # From https://github.com/pytorch/pytorch/issues/53815 x = torch.empty(2, device='meta') y = x + 2 self.assertEqual(y.size(), x.size()) def test_normal_shape(self): warned = False for device in torch.testing.get_all_device_types(): tensor1 = torch.rand(1, device=device) tensor4 = torch.rand(4, device=device) tensor120 = torch.rand(120, device=device) tensor2145 = torch.rand(2, 1, 4, 5, device=device) tensor2345 = torch.rand(2, 3, 4, 5, device=device) tensor2345_non_contiguous = torch.rand(2, 4, 3, 5, device=device).permute(0, 2, 1, 3) tensor2345_channels_last = tensor2345.contiguous(memory_format=torch.channels_last) output2345 = torch.zeros(2, 3, 4, 5, device=device) output345 = torch.zeros(3, 4, 5, device=device) # inputs have same size self.assertEqual(torch.normal(tensor2345, tensor2345).size(), (2, 3, 4, 5)) self.assertEqual(torch.normal(tensor2345_non_contiguous, tensor2345).size(), (2, 3, 4, 5)) self.assertEqual(torch.normal(tensor2345, tensor2345_channels_last).size(), (2, 3, 4, 5)) self.assertEqual(torch.normal(tensor2345_non_contiguous, tensor2345_channels_last).size(), (2, 3, 4, 5)) # scalar case self.assertEqual(torch.normal(tensor2345, 2).size(), (2, 3, 4, 5)) self.assertEqual(torch.normal(2, tensor2345).size(), (2, 3, 4, 5)) # inputs are expandable tensors self.assertEqual(torch.normal(tensor2345, tensor1).size(), (2, 3, 4, 5)) self.assertEqual(torch.normal(tensor2145, tensor2345).size(), (2, 3, 4, 5)) # inputs are non-expandable tensors, but they have same number of elements # TORCH_WARN_ONCE is used in torch.normal, only 1st assertEqual will show warn msg if not warned: self.assertWarnsRegex(UserWarning, "deprecated and the support will be removed", lambda: self.assertEqual(torch.normal(tensor120, tensor2345).size(), (120,))) warned = True else: self.assertEqual(torch.normal(tensor120, tensor2345).size(), (120,)) self.assertEqual(torch.normal(tensor2345, tensor120).size(), (2, 3, 4, 5)) # inputs are non-expandable tensors and they don't have same number of elements with self.assertRaisesRegex(RuntimeError, "inconsistent tensor"): torch.normal(tensor2345, tensor4) # output and inputs are size compatible self.assertEqual(torch.normal(tensor2345, tensor2345, out=output2345).size(), (2, 3, 4, 5)) # output and inputs are not size compatible with self.assertRaisesRegex(RuntimeError, "inconsistent tensor"): # inputs are expandable but have different broadcasted size than output torch.normal(tensor2345, tensor2145, out=output345) with self.assertRaisesRegex(RuntimeError, "inconsistent tensor"): # inputs are not expandable but reshapeable, output size is not the same as mean torch.normal(tensor2345, tensor120, out=output345) def test_tensoriterator_output_setup(self): # Test whether the output's memory layout is correct def test_memory_layout(x, y, scale, zero_point, out): self.assertEqual(x.dim(), 4) self.assertEqual(x.size(), y.size()) self.assertEqual(y.size(), out.size()) shape = x.size() for n in range(shape[0]): for c in range(shape[1]): for h in range(shape[2]): for w in range(shape[3]): if scale is not None and zero_point is not None: self.assertEqual( out[n][c][h][w], torch.ops.quantized.add(x[n][c][h][w], y[n][c][h][w], scale, zero_point)) else: self.assertEqual(out[n][c][h][w], x[n][c][h][w] + y[n][c][h][w]) xraw = torch.rand(2, 3, 4, 4) yraw = torch.rand(2, 3, 4, 4) qxraw = torch.quantize_per_tensor(xraw, 0.1, 5, torch.quint8) qyraw = torch.quantize_per_tensor(yraw, 0.1, 5, torch.quint8) # contiguous case fast setup test_memory_layout(xraw, yraw, None, None, xraw + yraw) test_memory_layout(qxraw, qyraw, 0.1, 5, torch.ops.quantized.add(qxraw, qyraw, 0.1, 5)) # channels last case fast setup x = xraw.contiguous(memory_format=torch.channels_last) y = yraw.contiguous(memory_format=torch.channels_last) test_memory_layout(x, y, None, None, x + y) qx = qxraw.contiguous(memory_format=torch.channels_last) qy = qyraw.contiguous(memory_format=torch.channels_last) test_memory_layout(qx, qy, 0.1, 5, torch.ops.quantized.add(qx, qy, 0.1, 5)) # non contiguous case fast setup (dense, non-overlapping, same shape and strides) x = xraw.permute(0, 2, 3, 1) y = yraw.permute(0, 2, 3, 1) test_memory_layout(x, y, None, None, x + y) qx = qxraw.permute(0, 2, 3, 1) qy = qyraw.permute(0, 2, 3, 1) test_memory_layout(qx, qy, 0.1, 5, torch.ops.quantized.add(qx, qy, 0.1, 5)) # non contiguous case fast setup (dense, non-overlapping) # input tensors have same shape and strides # output tensor have same shape as input tensors but different stride # output tensor should preserve its strides in this case x = xraw.permute(0, 2, 3, 1) y = yraw.permute(0, 2, 3, 1) out = torch.empty_like(xraw) out = out.permute(0, 3, 2, 1) expected_stride = out.stride() test_memory_layout(x, y, None, None, torch.add(x, y, out=out)) self.assertEqual(expected_stride, out.stride()) # non contiguous case non fast setup x = xraw.permute(0, 2, 3, 1) y = yraw.permute(0, 3, 2, 1) test_memory_layout(x, y, None, None, x + y) qx = qxraw.permute(0, 2, 3, 1) qy = qyraw.permute(0, 3, 2, 1) test_memory_layout(qx, qy, 0.1, 5, torch.ops.quantized.add(qx, qy, 0.1, 5)) # Tests to make sure we still handle .data properly until it is removed def test_dot_data_use(self): # .data allows to change the Tensors types inplace, check that we still # raise a nice error. with self.assertRaisesRegex( RuntimeError, # message includes both Double and Long '(?=.*Double)(?=.*Long)'): # Calls model with a LongTensor input but DoubleTensor weights input = torch.randn(1, 1, 1, 6, dtype=torch.double) weight = torch.zeros(1, 1, 1, 3, dtype=torch.long) model = torch.nn.Conv2d(1, 1, (1, 3), stride=1, padding=0, bias=False) model.weight.data = weight out = model(input) # Functions to test negative dimension wrapping METHOD = 1 INPLACE_METHOD = 2 FUNCTIONAL = 4 DIM_ARG = None def make_neg_dim_test(name, tensor_arg, arg_constr, types, extra_dim=0): def neg_dim_test(self): if isinstance(tensor_arg, list): assert METHOD not in types and INPLACE_METHOD not in types x = [torch.randn(arg) for arg in tensor_arg] ndim = len(tensor_arg[-1]) else: x = torch.randn(*tensor_arg) ndim = len(tensor_arg) ndim += extra_dim n_dim_to_test = sum(e is DIM_ARG for e in arg_constr()) for dims_val in combinations(range(ndim), n_dim_to_test): arg = arg_constr() arg_neg = copy.deepcopy(arg) idx = 0 for i, v in enumerate(arg): if v is DIM_ARG: arg[i] = dims_val[idx] arg_neg[i] = dims_val[idx] - ndim idx += 1 if METHOD in types: a = getattr(x, name)(*arg) b = getattr(x, name)(*arg_neg) self.assertEqual(a, b) if INPLACE_METHOD in types: a = x.clone() getattr(a, name + '_')(*arg) b = x.clone() getattr(b, name + '_')(*arg_neg) self.assertEqual(a, b) if FUNCTIONAL in types: a = getattr(torch, name)(x, *arg) b = getattr(torch, name)(x, *arg_neg) self.assertEqual(a, b) return neg_dim_test def idx_tensor(size, max_val): return torch.LongTensor(*size).random_(0, max_val - 1) def add_neg_dim_tests(): neg_dim_tests = [ ('narrow', (10, 20, 30), lambda: [DIM_ARG, 0, 5], [METHOD]), ('transpose', (10, 20, 30), lambda: [DIM_ARG, DIM_ARG], [METHOD, INPLACE_METHOD, FUNCTIONAL]), ('size', (10, 20, 30), lambda: [DIM_ARG], [METHOD]), ('cat', [(2, 3, 4), (2, 3, 4)], lambda: [DIM_ARG], [FUNCTIONAL]), ('chunk', (10, 20, 30), lambda: [5, DIM_ARG], [METHOD, FUNCTIONAL]), ('gather', (10, 20), lambda: [DIM_ARG, idx_tensor((10, 20), 10)], [METHOD, FUNCTIONAL]), ('index_select', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10)], [METHOD, FUNCTIONAL]), ('split', (10, 20), lambda: [5, DIM_ARG], [METHOD, FUNCTIONAL]), ('squeeze', (10, 1, 20, 1), lambda: [DIM_ARG], [METHOD, INPLACE_METHOD, FUNCTIONAL]), ('unbind', (2, 3, 4), lambda: [DIM_ARG], [FUNCTIONAL]), ('unsqueeze', (10, 20), lambda: [DIM_ARG], [METHOD, INPLACE_METHOD, FUNCTIONAL], 1), ('logcumsumexp', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('cumprod', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('cumsum', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('cummax', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('cummin', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('mean', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('median', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('nanmedian', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('mode', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('norm', (10, 20), lambda: [2, DIM_ARG], [METHOD, FUNCTIONAL]), ('prod', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('std', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('sum', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('var', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('kthvalue', (10, 20), lambda: [3, DIM_ARG], [METHOD, FUNCTIONAL]), ('max', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('min', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('sort', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('topk', (10, 20), lambda: [5, DIM_ARG], [METHOD, FUNCTIONAL]), ('renorm', (10, 20), lambda: [2, DIM_ARG, 1], [METHOD, INPLACE_METHOD, FUNCTIONAL]), ('index_add', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10), torch.randn(10, 10)], [INPLACE_METHOD]), ('index_copy', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10), torch.randn(10, 10)], [INPLACE_METHOD]), ('index_fill', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10), 12], [INPLACE_METHOD]), ('scatter', (10, 10), lambda: [DIM_ARG, idx_tensor((10, 10), 10), torch.randn(10, 10)], [INPLACE_METHOD]), ('select', (10, 20), lambda: [DIM_ARG, 3], [METHOD]), ('unfold', (10, 20), lambda: [DIM_ARG, 5, 2], [METHOD]), ] for decl in neg_dim_tests: if len(decl) == 4: name, tensor_arg, arg_constr, types = decl extra_dim = 0 elif len(decl) == 5: name, tensor_arg, arg_constr, types, extra_dim = decl test_name = 'test_' + name + '_neg_dim' assert not hasattr(AbstractTestCases._TestTorchMixin, test_name), "Duplicated test name: " + test_name setattr(AbstractTestCases._TestTorchMixin, test_name, make_neg_dim_test(name, tensor_arg, arg_constr, types, extra_dim)) @contextlib.contextmanager def torch_vital_set(value): stash = None if 'TORCH_VITAL' in os.environ: stash = os.environ['TORCH_VITAL'] os.environ['TORCH_VITAL'] = value try: yield finally: if stash: os.environ['TORCH_VITAL'] = stash else: del os.environ['TORCH_VITAL'] # Tests Vital Signs for Torch class TestBasicVitalSigns(TestCase): def test_basic_vitals(self): with torch_vital_set(''): self.assertFalse(torch.vitals_enabled()) with torch_vital_set('ON'): self.assertTrue(torch.vitals_enabled()) def test_basic_vitals_read_write(self): with torch_vital_set('ON'): self.assertTrue(torch.vitals_enabled()) # This tests the code path of setting a vital self.assertTrue(torch.set_vital('Dataloader', 'basic_unit_test', 'TEST_VALUE_STRING')) self.assertIn('TEST_VALUE_STRING', torch.read_vitals()) self.assertIn('CUDA.used', torch.read_vitals()) class TestVitalSignsCuda(TestCase): @onlyCUDA def test_cuda_vitals_gpu_only(self, device): with torch_vital_set('ON'): self.assertIn('CUDA.used\t\t true', torch.read_vitals()) # Device-generic tests. Instantiated below and not run directly. class TestTorchDeviceType(TestCase): exact_dtype = True # TODO: move all tensor creation to common ops def _rand_shape(self, dim, min_size, max_size): shape = [] for i in range(dim): shape.append(random.randint(min_size, max_size)) return tuple(shape) @onlyCPU def test_set_deterministic_deprecated_warning(self, device): with DeterministicGuard(torch.are_deterministic_algorithms_enabled()): # Calling set_deterministic throws a warning about deprecation once # per process but testing this is tricky here since we actually get # two warnings: one for the deprecated use of `set_deterministic` # and one for the 'beta' use of `use_deterministic_algorithms`. # The assertWarnsOnceRegex cannot handle two different warnings with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised prev = torch.is_warn_always_enabled() torch.set_warn_always(True) try: torch.set_deterministic(True) finally: torch.set_warn_always(prev) for w in ws: txt = str(w.message) assert ("torch.use_deterministic_algorithms is in beta" in txt or "torch.set_deterministic is deprecated" in txt) @onlyCPU def test_is_deterministic_deprecated_warning(self, device): with DeterministicGuard(torch.are_deterministic_algorithms_enabled()): # Calling is_deterministic throws a warning about deprecation once per process with self.assertWarnsOnceRegex(UserWarning, "torch.is_deterministic is deprecated"): torch.is_deterministic() # Validates that mathematical constants are defined properly, as required by # the Python Array API (https://data-apis.org/array-api/latest/API_specification/constants.html) @onlyCPU def test_constants(self, device): self.assertIsInstance(torch.e, float) self.assertEqual(torch.e, math.e, atol=0, rtol=0) self.assertIsInstance(torch.pi, float) self.assertEqual(torch.pi, math.pi, atol=0, rtol=0) self.assertIsInstance(torch.nan, float) self.assertEqual(torch.nan, math.nan, equal_nan=True) self.assertIsInstance(torch.inf, float) self.assertEqual(torch.inf, math.inf) @dtypes(torch.float32, torch.complex64) def test_storage(self, device, dtype): v = torch.randn(3, 5, dtype=dtype, device=device) self.assertEqual(v.storage()[0], v[0][0]) self.assertEqual(v.storage()[14], v[2][4]) @dtypes(torch.float32, torch.complex64) def test_deepcopy(self, device, dtype): from copy import deepcopy a = torch.randn(5, 5, dtype=dtype, device=device) b = torch.randn(5, 5, dtype=dtype, device=device) c = a.view(25) q = [a, [a.storage(), b.storage()], b, c] w = deepcopy(q) self.assertEqual(w[0], q[0], atol=0, rtol=0) self.assertEqual(w[1][0], q[1][0], atol=0, rtol=0) self.assertEqual(w[1][1], q[1][1], atol=0, rtol=0) self.assertEqual(w[1], q[1], atol=0, rtol=0) self.assertEqual(w[2], q[2], atol=0, rtol=0) # Check that deepcopy preserves sharing w[0].add_(1) for i in range(a.numel()): self.assertEqual(w[1][0][i], q[1][0][i] + 1) self.assertEqual(w[3], c + 1) w[2].sub_(1) for i in range(a.numel()): self.assertEqual(w[1][1][i], q[1][1][i] - 1) @dtypes(torch.float32, torch.complex64) def test_deepcopy_scalar(self, device, dtype): from copy import deepcopy a = torch.tensor(5, dtype=dtype, device=device) self.assertEqual(a.size(), deepcopy(a).size()) self.assertEqual(a, deepcopy(a)) def check_internal_mem_overlap(self, inplace_op, num_inputs, dtype, device, expected_failure=False): if isinstance(inplace_op, str): inplace_op = getattr(torch.Tensor, inplace_op) input = torch.randn(1, dtype=dtype, device=device).expand(3, 3) inputs = [input] + [torch.randn_like(input) for i in range(num_inputs - 1)] if not expected_failure: with self.assertRaisesRegex(RuntimeError, 'single memory location'): inplace_op(*inputs) else: with self.assertRaises(AssertionError): with self.assertRaisesRegex(RuntimeError, 'single memory location'): inplace_op(*inputs) def unary_check_input_output_mem_overlap(self, data, sz, op, expected_failure=False): def _test(op, output, input): output_exp = torch.empty_like(output) op(input, out=output_exp) self.assertEqual(op(input, out=output), output_exp, msg=op.__name__) # output is identical to input: _test(op, output=data[0:sz], input=data[0:sz]) # output and input are independent: _test(op, output=data[0:sz], input=data[sz:2 * sz]) # output partially overlaps with input: if not expected_failure: with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): _test(op, data[0:sz], data[1:sz + 1]) else: with self.assertRaises(AssertionError): with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): _test(op, data[0:sz], data[1:sz + 1]) # output is transpose of input: length = int(math.sqrt(sz)) input = data[:length**2].view([length, length]) out = input.t() if not expected_failure: with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): _test(op, out, input) else: with self.assertRaises(AssertionError): with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): _test(op, out, input) def ternary_check_input_output_mem_overlap(self, op, device, expected_failure=False): sz = 9 data = torch.randn(2 * sz, device=device) other1 = torch.randn(sz, device=device) other2 = torch.randn(sz, device=device) self.unary_check_input_output_mem_overlap( data, sz, lambda input, out: op(input, other1.view(input.shape), other2.view(input.shape), out=out), expected_failure=expected_failure) self.unary_check_input_output_mem_overlap( data, sz, lambda input, out: op(other1.view(input.shape), input, other2.view(input.shape), out=out), expected_failure=expected_failure) self.unary_check_input_output_mem_overlap( data, sz, lambda input, out: op(other1.view(input.shape), other2.view(input.shape), input, out=out), expected_failure=expected_failure) def _select_broadcastable_dims(self, dims_full=None): # select full dimensionality if dims_full is None: dims_full = [] ndims = random.randint(1, 4) dims_full = [random.randint(1, 8) for _ in range(ndims)] else: ndims = len(dims_full) # select actual dimensions for ops: # larger: full ndims, individual sizes may be reduced # smaller: possibly reduced ndims, sizes may be reduced smaller_ndims = random.randint(1, ndims) dims_small = [] dims_large = [] for i in range(ndims - 1, -1, -1): j = random.randint(1, 3) if j == 1: # no reduced singleton dimension ds = dims_full[i] dl = dims_full[i] elif j == 2: # larger may have reduced singleton dimension ds = dims_full[i] dl = 1 if len(dims_small) < smaller_ndims else dims_full[i] elif j == 3: # smaller may have reduced singleton dimension ds = 1 dl = dims_full[i] dims_large = [dl] + dims_large if len(dims_small) < smaller_ndims: dims_small = [ds] + dims_small return (dims_small, dims_large, dims_full) # collected tests of ops that used scalar_check in Declarations.cwrap for # correctness def test_scalar_check(self, device): zero_d = torch.randn((), device=device) one_d = torch.randn((1,), device=device) # remainder self.assertEqual((), torch.remainder(zero_d, zero_d).shape) self.assertEqual((), torch.remainder(zero_d, 2).shape) self.assertEqual((1,), torch.remainder(zero_d, one_d).shape) self.assertEqual((1,), torch.remainder(one_d, zero_d).shape) # fmod self.assertEqual((), torch.fmod(zero_d, zero_d).shape) self.assertEqual((), torch.fmod(zero_d, 2).shape) self.assertEqual((1,), torch.fmod(zero_d, one_d).shape) self.assertEqual((1,), torch.fmod(one_d, zero_d).shape) # exp, cos, cosh, tan, atan, tanh, erf, erfc, reciprocal self.assertEqual((), torch.exp(zero_d).shape) self.assertEqual((), torch.cos(zero_d).shape) self.assertEqual((), torch.cosh(zero_d).shape) self.assertEqual((), torch.tan(zero_d).shape) self.assertEqual((), torch.atan(zero_d).shape) self.assertEqual((), torch.acosh(zero_d).shape) self.assertEqual((), torch.asinh(zero_d).shape) self.assertEqual((), torch.atanh(zero_d).shape) self.assertEqual((), torch.tanh(zero_d).shape) self.assertEqual((), torch.erf(zero_d).shape) self.assertEqual((), torch.erfc(zero_d).shape) self.assertEqual((), torch.reciprocal(zero_d).shape) self.assertEqual((1,), torch.exp(one_d).shape) self.assertEqual((1,), torch.cos(one_d).shape) self.assertEqual((1,), torch.cosh(one_d).shape) self.assertEqual((1,), torch.tan(one_d).shape) self.assertEqual((1,), torch.atan(one_d).shape) self.assertEqual((1,), torch.acosh(one_d).shape) self.assertEqual((1,), torch.asinh(one_d).shape) self.assertEqual((1,), torch.atanh(one_d).shape) self.assertEqual((1,), torch.tanh(one_d).shape) self.assertEqual((1,), torch.erf(one_d).shape) self.assertEqual((1,), torch.erfc(one_d).shape) self.assertEqual((1,), torch.reciprocal(one_d).shape) # clamp self.assertEqual((), torch.clamp(zero_d, min=0, max=1).shape) self.assertEqual((), torch.clamp(zero_d, min=0).shape) self.assertEqual((), torch.clamp(zero_d, max=1).shape) self.assertEqual((1,), torch.clamp(one_d, min=0, max=1).shape) self.assertEqual((1,), torch.clamp(one_d, min=0).shape) self.assertEqual((1,), torch.clamp(one_d, max=1).shape) # cumsum, cumprod, cummax, cummin self.assertEqual((), torch.logcumsumexp(zero_d, 0).shape) self.assertEqual((), torch.cumsum(zero_d, 0).shape) self.assertEqual((), torch.cumprod(zero_d, 0).shape) self.assertEqual((), torch.cummax(zero_d, 0)[0].shape) self.assertEqual((), torch.cummin(zero_d, 0)[0].shape) # renorm self.assertRaises(RuntimeError, lambda: torch.renorm(zero_d, 0.5, 0, 1.0)) # sort, topk self.assertEqual([(), ()], [x.shape for x in torch.sort(zero_d, 0, False)]) self.assertEqual([(), ()], [x.shape for x in torch.sort(zero_d, 0, True)]) self.assertEqual([(), ()], [x.shape for x in torch.topk(zero_d, 1, 0, False)]) self.assertEqual([(), ()], [x.shape for x in torch.topk(zero_d, 1, 0, True)]) # lstsq (gels) self.assertRaises(RuntimeError, lambda: torch.lstsq(zero_d, zero_d)) # eig self.assertRaises(RuntimeError, lambda: torch.eig(zero_d, False)) self.assertRaises(RuntimeError, lambda: torch.eig(zero_d, True)) # this is only implemented on cpu if (torch.device(device).type == 'cpu'): self.assertRaises(RuntimeError, lambda: torch.ormqr(zero_d, zero_d, zero_d)) # max, min self.assertEqual((), torch.max(zero_d, zero_d).shape) self.assertEqual((1,), torch.max(one_d, zero_d).shape) self.assertEqual((1,), torch.max(zero_d, one_d).shape) self.assertEqual((), torch.min(zero_d, zero_d).shape) self.assertEqual((1,), torch.min(one_d, zero_d).shape) self.assertEqual((1,), torch.min(zero_d, one_d).shape) # diag self.assertRaises(RuntimeError, lambda: torch.diag(zero_d)) zero_d_int = torch.tensor(1, device=device) one_d_int = torch.tensor([1], device=device) # lshift, rshift self.assertEqual((), (zero_d_int >> zero_d_int).shape) self.assertEqual((), (zero_d_int >> 1).shape) self.assertEqual((1,), (one_d_int >> zero_d_int).shape) self.assertEqual((1,), (zero_d_int >> one_d_int).shape) self.assertEqual((1,), (one_d_int >> 1).shape) self.assertEqual((), (zero_d_int << zero_d_int).shape) self.assertEqual((), (zero_d_int << 1).shape) self.assertEqual((1,), (one_d_int << zero_d_int).shape) self.assertEqual((1,), (zero_d_int << one_d_int).shape) self.assertEqual((1,), (one_d_int << 1).shape) # or self.assertEqual((), (zero_d_int | zero_d_int).shape) self.assertEqual((), (zero_d_int | 1).shape) self.assertEqual((1,), (one_d_int | zero_d_int).shape) self.assertEqual((1,), (zero_d_int | one_d_int).shape) self.assertEqual((1,), (one_d_int | 1).shape) # and self.assertEqual((), (zero_d_int & zero_d_int).shape) self.assertEqual((), (zero_d_int & 1).shape) self.assertEqual((1,), (one_d_int & zero_d_int).shape) self.assertEqual((1,), (zero_d_int & one_d_int).shape) self.assertEqual((1,), (one_d_int & 1).shape) # clone self.assertEqual((), zero_d.clone().shape) zero_d_bool = torch.tensor(True, device=device) one_d_bool = torch.tensor([True], device=device) # masked_select self.assertEqual((1,), torch.masked_select(zero_d_bool, zero_d_bool).shape) self.assertEqual((1,), torch.masked_select(zero_d_bool, one_d_bool).shape) self.assertEqual((1,), torch.masked_select(one_d_bool, zero_d_bool).shape) zero_d_uint8 = torch.tensor(1, dtype=torch.uint8, device=device) one_d_uint8 = torch.tensor([1], dtype=torch.uint8, device=device) with warnings.catch_warnings(): warnings.simplefilter("ignore") self.assertEqual((1,), torch.masked_select(zero_d_uint8, zero_d_uint8).shape) self.assertEqual((1,), torch.masked_select(zero_d_uint8, one_d_uint8).shape) self.assertEqual((1,), torch.masked_select(one_d_uint8, zero_d_uint8).shape) # mode self.assertEqual([(), ()], [x.shape for x in torch.mode(zero_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.mode(zero_d, dim=0, keepdim=False)]) self.assertEqual([(1,), (1,)], [x.shape for x in torch.mode(one_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.mode(one_d, dim=0, keepdim=False)]) # max self.assertEqual([(), ()], [x.shape for x in torch.max(zero_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.max(zero_d, dim=0, keepdim=False)]) self.assertEqual([(1,), (1,)], [x.shape for x in torch.max(one_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.max(one_d, dim=0, keepdim=False)]) # amax self.assertEqual((), torch.amax(zero_d, dim=0, keepdim=True).shape) self.assertEqual((), torch.amax(zero_d, dim=0, keepdim=False).shape) self.assertEqual((1,), torch.amax(one_d, dim=0, keepdim=True).shape) self.assertEqual((), torch.amax(one_d, dim=0, keepdim=False).shape) # min self.assertEqual([(), ()], [x.shape for x in torch.min(zero_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.min(zero_d, dim=0, keepdim=False)]) self.assertEqual([(1,), (1,)], [x.shape for x in torch.min(one_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.min(one_d, dim=0, keepdim=False)]) # amin self.assertEqual((), torch.amin(zero_d, dim=0, keepdim=True).shape) self.assertEqual((), torch.amin(zero_d, dim=0, keepdim=False).shape) self.assertEqual((1,), torch.amin(one_d, dim=0, keepdim=True).shape) self.assertEqual((), torch.amin(one_d, dim=0, keepdim=False).shape) # set_ zero_d_clone = zero_d.clone() one_d_clone = one_d.clone() self.assertEqual((), zero_d_clone.set_(one_d.storage(), 0, (), ()).shape) self.assertEqual((1,), zero_d_clone.set_(one_d.storage(), 0, (1,), (1,)).shape) self.assertEqual((), one_d_clone.set_(one_d.storage(), 0, (), ()).shape) self.assertEqual((1,), one_d_clone.set_(one_d.storage(), 0, (1,), (1,)).shape) self.assertEqual((), zero_d.clone().set_(zero_d).shape) self.assertEqual((), one_d.clone().set_(zero_d).shape) self.assertEqual((1,), zero_d.clone().set_(one_d).shape) self.assertEqual((1,), one_d.clone().set_(one_d).shape) # take self.assertEqual((), torch.randn((2, 3), device=device).take(zero_d_int).shape) self.assertEqual((1,), torch.randn((2, 3), device=device).take(one_d_int).shape) # gather self.assertEqual((), torch.gather(zero_d, 0, torch.zeros((), dtype=torch.int64, device=device)).shape) self.assertEqual((1,), torch.gather(zero_d, 0, torch.zeros((1,), dtype=torch.int64, device=device)).shape) self.assertEqual((), torch.gather(one_d, 0, torch.zeros((), dtype=torch.int64, device=device)).shape) self.assertEqual((1,), torch.gather(one_d, 0, torch.zeros((1,), dtype=torch.int64, device=device)).shape) # normal # std must be >= 0 zero_d_ge_0 = torch.rand((), device=device) # documentation says out shape matches shape of mean self.assertEqual((), torch.normal(zero_d, zero_d_ge_0).shape) self.assertEqual((1,), torch.normal(one_d, zero_d_ge_0).shape) self.assertEqual((), torch.normal(1, zero_d_ge_0).shape) self.assertEqual((), torch.normal(zero_d, 1).shape) self.assertEqual((1,), torch.normal(one_d, 1).shape) # TODO: this behavior differs on CPU and GPU, see https://github.com/pytorch/pytorch/issues/30480. # self.assertEqual((), torch.normal(zero_d, one_d).shape) # self.assertEqual((), torch.normal(1, one_d).shape) # convolutions. Yes, we are testing nn.functional here; seems justified # given its similar to the other tests w = torch.randn(2, 1, 3, 3, device=device).div_(2).requires_grad_() self.assertRaises(RuntimeError, lambda: torch.nn.functional.conv2d(zero_d, w, groups=1)) self.assertRaises(RuntimeError, lambda: torch.nn.functional.conv2d(zero_d, w, groups=2)) # nll_loss -- verify input can't be 0-dimensional. self.assertRaises(ValueError, lambda: torch.nn.functional.nll_loss(zero_d, zero_d, reduction='none')) self.assertRaises(ValueError, lambda: torch.nn.functional.nll_loss(zero_d, one_d, reduction='none')) # verify output is 0-dimensional when reduction != 'none' for (input, target) in ((torch.randn(1, 1, device=device), torch.tensor([0], device=device)), (torch.randn(1, 1, 1, 1, device=device), torch.tensor([[[0]]], device=device))): self.assertEqual((), torch.nn.functional.nll_loss(input, target, reduction='mean').shape) self.assertEqual((), torch.nn.functional.nll_loss(input, target, reduction='sum').shape) # multilabel_margin_loss for input in (zero_d, one_d, torch.randn(1, 1, device=device)): for target in (torch.tensor(0, device=device), torch.tensor([0], device=device), torch.tensor([[0]], device=device)): if (input.dim() <= 1 and target.dim() <= 1) or (input.dim() == 2 and target.dim() == 2): output_shape = (target.shape[0],) if target.dim() == 2 else () self.assertEqual(output_shape, torch.nn.functional.multilabel_margin_loss(input, target, reduction='none').shape) self.assertEqual((), torch.nn.functional.multilabel_margin_loss(input, target, reduction='mean').shape) self.assertEqual((), torch.nn.functional.multilabel_margin_loss(input, target, reduction='sum').shape) else: self.assertRaises(RuntimeError, lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='none')) self.assertRaises(RuntimeError, lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='mean')) self.assertRaises(RuntimeError, lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='sum')) # multi_margin_loss for input in (zero_d, one_d, torch.randn(1, 1, device=device)): for target in (torch.tensor(0, device=device), torch.tensor([0], device=device)): self.assertEqual(target.shape, torch.nn.functional.multi_margin_loss(input, target, reduction='none').shape) self.assertEqual((), torch.nn.functional.multi_margin_loss(input, target, reduction='mean').shape) self.assertEqual((), torch.nn.functional.multi_margin_loss(input, target, reduction='sum').shape) # Uses mismatched arange out size to trigger a warning def test_cpp_warnings_have_python_context(self, device): # Creates long string in advance to avoid a too-long Python line s = ".+Triggered internally at.+RangeFactories.+" def cpp_warn_fn(): out = torch.empty((5,)) torch.arange(0, 3, out=out) return out # Checks eager-mode cpp warning with warnings.catch_warnings(record=True) as w: cpp_warn_fn() frameinfo = inspect.getframeinfo(inspect.currentframe()) warning = w[0] # Checks for cpp context in the warning message self.assertTrue(re.search(s, str(warning.message)) is not None) # Checks the Python features of the warning # Note: the eager mode warning refers to the line in the function # that throws the warning. self.assertEqual(frameinfo.lineno - 6, warning.lineno) self.assertEqual(len(w), 1) # Checks jitted cpp warning with warnings.catch_warnings(record=True) as w: scripted_cpp_warn_fn = torch.jit.script(cpp_warn_fn) scripted_cpp_warn_fn() warning = w[0] # Checks for cpp context in the warning message self.assertTrue(re.search(s, str(warning.message)) is not None) # Checks the Python features of the warning # Note: the jitted warning's lineno refers to the call to the jitted # function, which in our test suite has a layer of indirection # that makes checking the Python lineno fragile self.assertEqual(len(w), 1) # Checks jitted Python warning def warn_fn(): warnings.warn("Warning!") # The jit mimics an eager-mode Python warning in this case with warnings.catch_warnings(record=True) as w: scripted_warn_fn = torch.jit.script(warn_fn) scripted_warn_fn() frameinfo = inspect.getframeinfo(inspect.currentframe()) warning = w[0] self.assertTrue(re.search('Warning!', str(warning.message)) is not None) # Checks the Python features of the warning self.assertEqual(frameinfo.lineno - 6, warning.lineno) self.assertEqual(len(w), 1) @onlyCPU def test_warn_always_caught(self, device): # Check that we can catch a TORCH_WARN_ONCE warning twice # since assertWarnsOnceRegex uses set_warn_always(True) which changes # TORCH_WARN_ONCE to TORCH_WARN a = np.arange(10) a.flags.writeable = False with self.assertWarnsOnceRegex(UserWarning, '.*non-writeable.*'): torch.from_numpy(a) # OK, got it once, now try again with self.assertWarnsOnceRegex(UserWarning, '.*non-writeable.*'): torch.from_numpy(a) # Make sure emitting two warnings will pass the assertWarnsOnceRegex # context manager with self.assertWarnsOnceRegex(UserWarning, '.*non-writeable.*'): torch.from_numpy(a) torch.from_numpy(a) # TODO: this test should be in test_nn.py def test_conv_transposed_backward_agnostic_to_memory_format(self, device): in_channels = 64 out_channels = 128 scale_factor = 8 batch_size = 8 length = 16 conv = torch.nn.ConvTranspose1d( in_channels, out_channels, kernel_size=scale_factor * 2, stride=scale_factor).to(device) layer_norm = torch.nn.LayerNorm(out_channels).to(device) input_ = torch.randn(batch_size, in_channels, length).to(device).contiguous() input_ = conv(input_).contiguous() input_ = layer_norm(input_.transpose(1, 2).contiguous()).contiguous() input_.sum().backward() # TODO: this test should be in test_nn.py @onlyCUDA @largeTensorTest('12GB') def test_conv_transposed_large(self, device): # ConvTranspose3d works for large input tensors (gh-32866) in_channels = 64 out_channels = 128 kernel_size = 5 conv = torch.nn.ConvTranspose3d( in_channels, out_channels, kernel_size=kernel_size, stride=2, padding=2, output_padding=1).to(device) x = torch.rand([1, 64, 8, 128, 172]).to(device) y = conv(x) def test_is_set_to(self, device): t1 = torch.empty(3, 4, 9, 10, device=device) t2 = torch.empty(3, 4, 9, 10, device=device) t3 = torch.tensor([], device=device).set_(t1) t4 = t3.clone().resize_(12, 90) self.assertFalse(t1.is_set_to(t2)) self.assertTrue(t1.is_set_to(t3)) self.assertTrue(t3.is_set_to(t1), "is_set_to should be symmetric") self.assertFalse(t1.is_set_to(t4)) self.assertFalse(torch.tensor([]).is_set_to(torch.tensor([])), "Tensors with no storages should not appear to be set " "to each other") t1 = torch.tensor([True, True], dtype=torch.bool, device=device) t2 = torch.tensor([0], dtype=torch.bool, device=device).set_(t1) self.assertTrue(t1.is_set_to(t2)) # test that sizes must match t1 = torch.empty([2, 3, 4], device=device) t2 = t1.view(4, 3, 2) self.assertFalse(t1.is_set_to(t2)) self.assertFalse(t2.is_set_to(t1)) # test that legacy empty size behavior used to be respected (i.e. all # empty tensors were logically collapsed to size [0]). t1 = torch.empty([2, 5, 0], device=device) t2 = t1.view([0]) self.assertFalse(t1.is_set_to(t2)) self.assertFalse(t2.is_set_to(t1)) def test_broadcast(self, device): # all functions fns = { "dist", "atan2", "pow", "lerp", "add", "sub", "mul", "div", "fmod", "remainder", "eq", "ge", "gt", "le", "lt", "max", "min", "ne", "addcdiv", "addcmul", "masked_scatter", "masked_select", "masked_fill", "map", "map2", "copy" } # functions with three tensor arguments fns_3_args = {"map2"} fns_value_kwarg = {"addcdiv", "addcmul"} for fn in fns: (dims_small, dims_large, dims_full) = self._select_broadcastable_dims() full1d = torch.randn(*dims_full, device=device).flatten().float() small = torch.randn(*dims_small, device=device).float() large = torch.randn(*dims_large, device=device).float() small_expanded = small.expand(*dims_full) large_expanded = large.expand(*dims_full) small2 = None small2_expanded = None if fn in fns_3_args or fn in fns_value_kwarg: # create another smaller tensor (dims_small2, _, _) = self._select_broadcastable_dims(dims_full) small2 = torch.randn(*dims_small2, device=device).float() small2_expanded = small2.expand(*dims_full) if small.is_cuda and fn in ['map', 'map2']: # map and map2 are not implementd on CUDA tensors continue if hasattr(large_expanded, fn): # run through tensor versions of functions # and verify fully expanded inputs give same results expanded = {large: large_expanded, small: small_expanded, small2: small2_expanded} def tensorfn(myfn, t1, t2): if fn == "lerp": return myfn(t1, 0.5) elif fn == "masked_select": return myfn(t1 < 0) elif fn == "masked_scatter": return myfn(t1 < 0.5, full1d) elif fn == "masked_fill": return myfn(t1 < 0.5, 1.0) elif fn in fns_3_args: return myfn(1, t1, t2) elif fn in fns_value_kwarg: return myfn(t1, t2, value=1) else: return myfn(t1) # test various orders for first, second, third in [(large, small, small2), (small, large, small2), (small2, small, large), (small2, large, small)]: if first is None: break # ignore last iter when small2 is None method_expanded = getattr(expanded[first], fn) method = getattr(first, fn) r1 = tensorfn(method_expanded, expanded[second], expanded[third]) r2 = tensorfn(method, second, third) self.assertEqual(r1, r2) # now for torch. versions of functions if hasattr(torch, fn): fntorch = getattr(torch, fn) expanded = {large: large_expanded, small: small_expanded, small2: small2_expanded} def torchfn(t1, t2, t3): if fn == "lerp": return fntorch(t1, t2, 0.5) elif fn == "masked_select": return fntorch(t1, t2 < 0) elif fn == "masked_scatter": return fntorch(t1, t2 < 0.5, full1d) elif fn == "masked_fill": return fntorch(t1, t2 < 0.5, 1.0) elif fn in fns_3_args: return fntorch(t1, 1.0, t2, t3) elif fn in fns_value_kwarg: return fntorch(t1, t2, t3, value=1.0) else: return fntorch(t1, t2) # test various orders for first, second, third in [(large, small, small2), (small, large, small2), (small2, small, large), (small2, large, small)]: if first is None: break # ignore last iter when small2 is None r1 = torchfn(expanded[first], expanded[second], expanded[third]) r2 = torchfn(first, second, third) self.assertEqual(r1, r2) # now for in place functions # in-place tensor is not broadcastable; test only guaranteed # to work by broadcasting other argument(s) if not hasattr(large_expanded, fn + "_"): continue # need to clone largeExpanded so we can reuse, since functions are in-place large_expanded_clone = large_expanded.clone() def tensorfn_inplace(t0, t1, t2=None): t0_fn = getattr(t0, fn + "_") if fn == "lerp": return t0_fn(t1, 0.5) elif fn == "masked_scatter": return t0_fn(t1 < 0.5, full1d) elif fn == "masked_fill": return t0_fn(t1 < 0.5, 1.0) elif fn == "map": return t0_fn(t1, lambda x, y: x + y) elif fn == "map2": return t0_fn(t1, t2, lambda x, y, z: x + y + z) elif fn in fns_3_args: return t0_fn(1.0, t1, t2) elif fn in fns_value_kwarg: return t0_fn(t1, t2, value=1.0) else: return t0_fn(t1) # in-place pointwise operations don't actually work if the in-place # tensor is 0-strided (numpy has the same issue) if (0 not in large_expanded.stride() and 0 not in large_expanded_clone.stride()): r1 = tensorfn_inplace(large_expanded, small_expanded, small2_expanded) r2 = tensorfn_inplace(large_expanded_clone, small, small2) self.assertEqual(r1, r2) def broadcastable(t0, t1, t2=None): try: t1.expand_as(t0) if t2 is not None: t2.expand_as(t0) except RuntimeError: return False return True def _test_in_place_broadcastable(t0, t1, t2=None): if not broadcastable(t0, t1, t2): same_size = t0.numel() == t1.numel() and (t0.numel() == t2.numel() if t2 is not None else True) if not same_size: self.assertRaises(RuntimeError, lambda: tensorfn_inplace(t0, t1, t2)) else: tensorfn_inplace(t0, t1, t2) if fn not in fns_3_args and fn not in fns_value_kwarg: _test_in_place_broadcastable(small, large_expanded) _test_in_place_broadcastable(small, large) else: _test_in_place_broadcastable(small2, small_expanded, large_expanded) _test_in_place_broadcastable(small2, small, large) @unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error") @onlyCUDA @wrapDeterministicFlagAPITest def test_cublas_config_nondeterministic_alert(self, device): test_cases = [ # (function, (tensor sizes)) ('mm', ((2, 2), (2, 2),)), ('mv', ((2, 2), (2,),)), ('bmm', ((1, 2, 2), (1, 2, 2),))] test_configs = [ # (CuBLAS workspace config, is deterministic) ('garbage', False), (None, False), (':4096:8', True), (':16:8', True)] cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG' is_cuda10_2_or_higher = ( (torch.version.cuda is not None) and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2])) def test_case_info(fn_name, config): return f'function "{fn_name}" with config "{"" if config is None else config}"' # Create processes to test each combination of test cases and config settings processes = [] for fn_name, arg_sizes in test_cases: for config, is_config_deterministic in test_configs: env = os.environ.copy() if config is None: if env.get(cublas_var_name) is not None: del env[cublas_var_name] else: env[cublas_var_name] = config should_throw_error = is_cuda10_2_or_higher and not is_config_deterministic script = f""" import torch torch.use_deterministic_algorithms(True) fn = torch.{fn_name} arg_sizes = {arg_sizes} device = '{device}' should_throw_error = {should_throw_error} args = [] for arg_size in arg_sizes: args.append(torch.randn(*arg_size, device=device)) try: fn(*args) except RuntimeError as e: if not should_throw_error: raise RuntimeError('Did not expect any error to be raised') elif 'Deterministic behavior was enabled with either' not in str(e): raise RuntimeError('Expected a CuBLAS nondeterministic error, but got a different error') else: if should_throw_error: raise RuntimeError('Expected a CuBLAS nondeterministic error, but it was not raised') """ try: subprocess.check_output( [sys.executable, '-c', script], stderr=subprocess.STDOUT, # On Windows, opening the subprocess with the default CWD makes `import torch` # fail, so just set CWD to this script's directory cwd=os.path.dirname(os.path.realpath(__file__)), env=env) except subprocess.CalledProcessError as e: self.fail(msg=( f'Subprocess exception while attempting to run {test_case_info(fn_name, config)}:\n' + e.output.decode("utf-8"))) def test_nondeterministic_alert_AvgPool3d(self, device): module = torch.nn.AvgPool3d(3) input = torch.randn(2, 3, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('avg_pool3d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_AdaptiveAvgPool2d(self, device): module = torch.nn.AdaptiveAvgPool2d(3) input = torch.randn(2, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('adaptive_avg_pool2d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_AdaptiveAvgPool3d(self, device): module = torch.nn.AdaptiveAvgPool3d(3) input = torch.randn(2, 3, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('adaptive_avg_pool3d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_MaxPool3d(self, device): module = torch.nn.MaxPool3d(3) input = torch.randn(2, 3, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('max_pool3d_with_indices_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_AdaptiveMaxPool2d(self, device): module = torch.nn.AdaptiveMaxPool2d(3) input = torch.randn(2, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('adaptive_max_pool2d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_FractionalMaxPool2d(self, device): module = torch.nn.FractionalMaxPool2d(2, output_ratio=0.5) input = torch.randn(2, 3, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('fractional_max_pool2d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_FractionalMaxPool3d(self, device): module = torch.nn.FractionalMaxPool3d(2, output_ratio=0.5) input = torch.randn(2, 3, 3, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('fractional_max_pool3d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_interpolate_linear(self, device): input = torch.randn(1, 2, 4, device=device, requires_grad=True) res = torch.nn.functional.interpolate( input, size=12, mode='linear', align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('upsample_linear1d_backward_out_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_interpolate_bilinear(self, device): input = torch.randn(1, 2, 4, 4, device=device, requires_grad=True) res = torch.nn.functional.interpolate( input, size=12, mode='bilinear', align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('upsample_bilinear2d_backward_out_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_interpolate_bicubic(self, device): input = torch.randn(1, 2, 4, 4, device=device, requires_grad=True) res = torch.nn.functional.interpolate( input, size=12, mode='bicubic', align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('upsample_bicubic2d_backward_out_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_interpolate_trilinear(self, device): input = torch.randn(1, 2, 4, 4, 4, device=device, requires_grad=True) res = torch.nn.functional.interpolate( input, size=12, mode='trilinear', align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('upsample_trilinear3d_backward_out_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_ReflectionPad1d(self, device): module = torch.nn.ReflectionPad1d((1, 2)) input = torch.randn(2, 3, 8, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('reflection_pad1d_backward_out_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_ReflectionPad2d(self, device): module = torch.nn.ReflectionPad2d((1, 2, 3, 4)) input = torch.randn(2, 3, 8, 8, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('reflection_pad2d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_ReflectionPad3d(self, device): module = torch.nn.ReflectionPad3d((1, 2, 3, 4, 5, 6)) input = torch.randn(2, 3, 8, 8, 8, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('reflection_pad3d_backward_out_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_ReplicationPad1d(self, device): module = torch.nn.ReplicationPad1d((1, 2)) input = torch.randn(2, 3, 4, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('replication_pad1d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_ReplicationPad2d(self, device): module = torch.nn.ReplicationPad2d((1, 2, 3, 4)) input = torch.randn(2, 3, 4, 4, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('replication_pad2d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_ReplicationPad3d(self, device): module = torch.nn.ReplicationPad3d((1, 2, 3, 4, 5, 6)) input = torch.randn(2, 3, 4, 4, 4, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('replication_pad3d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_NLLLoss(self, device): module = torch.nn.NLLLoss() input = torch.randn(2, 3, 5, 5, device=device) target = torch.rand(2, 5, 5, device=device).mul(3).floor().long() @expectedAlertNondeterministic('SpatialClassNLLCriterion_updateOutput', 'cuda') def forward_func(slf, device): module(input, target) forward_func(self, device) def test_nondeterministic_alert_CTCLoss(self, device): module = torch.nn.CTCLoss() input = torch.randn(50, 3, 15, device=device, requires_grad=True) target = torch.randint(0, 14, (3, 30), device=device) input_lengths = [50, 50, 50] target_lengths = [30, 25, 20] res = module(input, target, input_lengths, target_lengths) grad = torch.ones_like(res) @expectedAlertNondeterministic('ctc_loss_backward_gpu', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_EmbeddingBag_max(self, device): module = torch.nn.EmbeddingBag( 4, 3, None, 2., False, 'max', _weight=torch.randn(4, 3, device=device, requires_grad=True)) input = torch.randint(0, 3, (4, 3), device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('embedding_bag_backward_cuda_max', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_scatter_add(self, device): def test_func(op_call): input = torch.randn(5, 4, device=device) dim = 0 index = torch.tensor([[3]], device=device) src = torch.tensor([[1.0]], device=device) @expectedAlertNondeterministic('scatter_add_cuda_kernel', 'cuda') def forward_func(slf, device): op_call(input, dim, index, src) forward_func(self, device) test_func(torch.Tensor.scatter_add_) test_func(torch.Tensor.scatter_add) test_func(torch.scatter_add) @onlyOnCPUAndCUDA def test_nondeterministic_alert_put(self, device): def test_func(op_call): a = torch.randn(10, device=device) indices = torch.tensor([0, 0], device=device) values = torch.tensor([0., 1.], device=device) @expectedAlertNondeterministic('put_') def forward_func(slf, device): op_call(a, indices, values, accumulate=False) forward_func(self, device) test_func(torch.Tensor.put) test_func(torch.Tensor.put_) def test_nondeterministic_alert_put_accumulate(self, device): def test_func(op_call): a = torch.randn(10, device=device) indices = torch.tensor([0, 0], device=device) values = torch.tensor([0., 1.], device=device) @expectedAlertNondeterministic('put_', 'cuda') def forward_func(slf, device): op_call(a, indices, values, accumulate=True) forward_func(self, device) test_func(torch.Tensor.put) test_func(torch.Tensor.put_) def test_nondeterministic_alert_histc(self, device): def test_func(op_call): a = torch.tensor([], device=device) @expectedAlertNondeterministic('_histc_cuda', 'cuda') def forward_func(slf, device): res = op_call(a, min=0, max=3) forward_func(self, device) test_func(torch.histc) test_func(torch.Tensor.histc) def test_nondeterministic_alert_bincount(self, device): def test_func(op_call): a = torch.tensor([], device=device, dtype=torch.long) @expectedAlertNondeterministic('_bincount_cuda', 'cuda') def forward_func(slf, device): res = op_call(a) forward_func(self, device) test_func(torch.bincount) test_func(torch.Tensor.bincount) # Ensures that kthvalue throws nondeterministic alerts in the correct cases @dtypes(torch.double) def test_nondeterministic_alert_kthvalue(self, device, dtype): @expectedAlertNondeterministic('kthvalue CUDA', 'cuda') def test_func(slf, device, call_type): S = 10 k = 5 a = torch.randn(S, device=device) if call_type == 'function': torch.kthvalue(a, k) elif call_type == 'method': a.kthvalue(k) elif call_type == 'out': values = torch.empty_like(a) indices = torch.empty((), device=device, dtype=torch.long) torch.kthvalue(a, k, out=(values, indices)) else: self.fail(f"'{call_type}' is not a valid call type") test_func(self, device, 'function') test_func(self, device, 'method') test_func(self, device, 'out') @onlyOnCPUAndCUDA def test_nondeterministic_alert_gather(self, device): def test_func(op_call): a = torch.randn(3, 3, device=device, requires_grad=True) dim = 0 index = torch.tensor([[0]], device=device) res = op_call(a, dim, index) grad = torch.ones_like(res) @expectedAlertNondeterministic('scatter_add_cuda_kernel', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) test_func(torch.gather) test_func(torch.Tensor.gather) def test_nondeterministic_alert_grid_sample_2d(self, device): input = torch.empty(1, 1, 2, 2, device=device, requires_grad=True) grid = torch.empty(1, 1, 1, 2, device=device) res = torch.nn.functional.grid_sample(input, grid, align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('grid_sampler_2d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_grid_sample_3d(self, device): input = torch.empty(1, 1, 2, 2, 2, device=device, requires_grad=True) grid = torch.empty(1, 1, 1, 2, 3, device=device) res = torch.nn.functional.grid_sample(input, grid, align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('grid_sampler_3d_backward_cuda', 'cuda') def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_embedding_scalar_weight_error(self, device): indices = torch.rand(2, 2, device=device).long() weights = [ torch.tensor(1.0, device=device), torch.tensor(1.0, device=device).reshape(1, 1, 1), ] for weight in weights: with self.assertRaisesRegex(RuntimeError, "'weight' must be 2-D"): torch.embedding(weight, indices) def test_dist(self, device): def run_test(x, y): for p in [0, 1, 2, 3, 4, inf, -inf]: dist_xy = torch.dist(x, y, p) dist_xy_norm = torch.norm(x - y, p) self.assertEqual(dist_xy, dist_xy_norm) run_test(torch.randn(5, device=device), torch.randn(5, device=device)) x = torch.zeros(3, device=device) y = torch.zeros(3, device=device) y[1] = 1. run_test(x, y) # Ensures that median throws nondeterministic alerts in the correct cases @dtypes(torch.double) def test_nondeterministic_alert_median(self, device, dtype): def test_func(slf, device, call_type): S = 10 a = torch.randn(S, device=device) if call_type == 'function': torch.median(a) elif call_type == 'function with indices': torch.median(a, 0) elif call_type == 'method': a.median() elif call_type == 'method with indices': a.median(0) elif call_type == 'out with indices': result = torch.empty_like(a) indices = torch.empty((), dtype=torch.long, device=device) torch.median(a, 0, out=(result, indices)) else: self.fail(f"'{call_type}' is not a valid call type") @expectedAlertNondeterministic('median CUDA with indices output', 'cuda') def test_func_expect_error(slf, device, call_type): test_func(slf, device, call_type) test_func(self, device, 'function') test_func_expect_error(self, device, 'function with indices') test_func(self, device, 'method') test_func_expect_error(self, device, 'method with indices') test_func_expect_error(self, device, 'out with indices') def _test_gather_backward_one_dim(self, device, deterministic: bool = False) -> None: with DeterministicGuard(deterministic): m = random.randint(2000, 3000) elems = random.randint(10 * m, 20 * m) dim = 0 src = torch.randn(m, device=device, requires_grad=True) idx = torch.randint(m, (elems,), device=device) res = torch.gather(src, dim, idx) weight = torch.rand_like(res, device=device) * 10 ** 6 res.backward(weight) grad = src.grad.detach().clone() if torch.device(device).type == 'cuda': for _ in range(2): src.grad.data.zero_() res = torch.gather(src, dim, idx) res.backward(weight) self.assertEqual(src.grad, grad, atol=0, rtol=0) else: expected = torch.zeros_like(src, device=device) for i in range(elems): expected[idx[i]] += weight[i] self.assertEqual(grad, expected, atol=0, rtol=0) @onlyOnCPUAndCUDA def test_gather_backward_deterministic_path(self, device) -> None: self._test_gather_backward_one_dim(device, True) @onlyCPU def test_gather_backward_one_dim(self, device) -> None: self._test_gather_backward_one_dim(device, False) @onlyOnCPUAndCUDA def test_scatter_add_one_dim_deterministic(self, device) -> None: with DeterministicGuard(True): m = random.randint(20, 30) elems = random.randint(2000 * m, 3000 * m) dim = 0 src = torch.randn(elems, device=device) idx = torch.randint(m, (elems,), device=device) x = torch.zeros(m, device=device) res = x.scatter_add(dim, idx, src) expected = torch.zeros(m, device=device) for i in range(elems): expected[idx[i]] += src[i] self.assertEqual(res, expected, atol=0, rtol=0) @dtypes(*torch.testing.get_all_fp_dtypes()) def test_log_normal(self, device, dtype): a = torch.tensor([10], dtype=dtype, device=device).log_normal_() self.assertEqual(a.dtype, dtype) self.assertEqual(a.size(), torch.Size([1])) @dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes())) def test_geometric(self, device, dtype): a = torch.tensor([10], dtype=dtype, device=device).geometric_(0.5) self.assertEqual(a.dtype, dtype) self.assertEqual(a.size(), torch.Size([1])) def test_repeat_interleave(self, device): y = torch.tensor([[1, 2], [3, 4]], device=device) # exercise single argument function signature temp = y.repeat_interleave(2) self.assertEqual(torch.Size([8]), temp.size()) for dtype in [torch.int, torch.long]: lengths = torch.tensor([1, 2], dtype=dtype, device=device) output_size = torch.sum(lengths) a = torch.repeat_interleave( y, lengths, dim=0, ) self.assertEqual(a.dtype, y.dtype) self.assertEqual(a.size(), torch.Size([3, 2])) a_with_output = torch.repeat_interleave( y, lengths, dim=0, output_size=output_size, ) self.assertEqual(a_with_output.dtype, y.dtype) self.assertEqual(a_with_output.size(), torch.Size([3, 2])) @dtypes(*(torch.testing.get_all_fp_dtypes(include_half=False, include_bfloat16=False))) @dtypesIfCUDA(*(torch.testing.get_all_fp_dtypes(include_bfloat16=False))) def test_bernoulli_p(self, device, dtype): for trivial_p in ([0, 1], [1, 0, 1, 1, 0, 1]): x = torch.tensor(trivial_p, dtype=dtype, device=device) self.assertEqual(x.bernoulli().tolist(), trivial_p) def isBinary(t): return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum().item() == 0 p = torch.rand(5, 5, dtype=dtype, device=device) self.assertTrue(isBinary(p.bernoulli())) p = torch.rand(5, dtype=dtype, device=device).expand(5, 5) self.assertTrue(isBinary(p.bernoulli())) p = torch.rand(5, 5, dtype=dtype, device=device) torch.bernoulli(torch.rand_like(p), out=p) self.assertTrue(isBinary(p)) # RngUniform not implemented for Integral type in XLA test @dtypes(*(torch.testing.get_all_fp_dtypes(include_half=False, include_bfloat16=False))) @dtypesIfCPU(*(torch.testing.get_all_dtypes(include_half=False, include_bfloat16=False, include_complex=False))) @dtypesIfCUDA(*(torch.testing.get_all_dtypes(include_bfloat16=False, include_complex=False))) def test_bernoulli_self(self, device, dtype): def isBinary(t): return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum().item() == 0 t = torch.empty(10, 10, dtype=dtype, device=device) t.fill_(2) t.bernoulli_(0.5) self.assertTrue(isBinary(t)) for p_dtype in torch.testing.get_all_fp_dtypes(include_half=device.startswith('cuda'), include_bfloat16=False): p = torch.rand(10, dtype=p_dtype, device=device).expand(10, 10) t.fill_(2) t.bernoulli_(p) self.assertTrue(isBinary(t)) t.fill_(2) torch.bernoulli(torch.rand_like(t, dtype=p_dtype), out=t) self.assertTrue(isBinary(t)) t.fill_(2) t.bernoulli_(torch.rand_like(t, dtype=p_dtype)) self.assertTrue(isBinary(t)) @slowTest @dtypes(*(torch.testing.get_all_fp_dtypes(include_half=False, include_bfloat16=False))) @dtypesIfCUDA(*(torch.testing.get_all_fp_dtypes(include_bfloat16=False))) def test_bernoulli_edge_cases(self, device, dtype): # Need to draw a lot of samples to cover every random floating point number. a = torch.zeros(10000, 10000, dtype=dtype, device=device) # probability of drawing "1" is 0 num_ones = (torch.bernoulli(a) == 1).sum() self.assertEqual(num_ones, 0) b = torch.ones(10000, 10000, dtype=dtype, device=device) # probability of drawing "1" is 1 num_zeros = (torch.bernoulli(b) == 0).sum() self.assertEqual(num_zeros, 0) @dtypes(*torch.testing.get_all_fp_dtypes()) def test_exponential(self, device, dtype): a = torch.tensor([10], dtype=dtype, device=device).exponential_(0.5) self.assertEqual(a.dtype, dtype) self.assertEqual(a.size(), torch.Size([1])) # Tests extremal behavior tests = ((-0, float('inf')), (0, float('inf')), (float('inf'), 0)) for test in tests: t = torch.empty((1,), device=device, dtype=dtype).exponential_(test[0]) self.assertTrue(t.item() == test[1]) # Tests that negative lambda fails with self.assertRaises(RuntimeError): torch.empty((1,), device=device, dtype=dtype).exponential_(-0.5) @onlyCUDA @dtypesIfCUDA(torch.half, torch.float) def test_exponential_no_zero(self, device, dtype): # naively, 0 in exponential can be generated with probability 2^-24 # so we need more samples to check if it's not generated # instead of doing one # don't test CPU, that would be a long test x = torch.empty(50000000, device=device, dtype=dtype).exponential_() self.assertTrue(x.min() > 0) @skipIfNoSciPy @dtypes(*torch.testing.get_all_fp_dtypes()) def test_uniform_kstest(self, device, dtype): from scipy import stats size = 1000 for from_ in [-42, 0, 4.2]: for to_ in [-4.2, 0, 42]: if to_ > from_: t = torch.empty(size, dtype=dtype, device=device).uniform_(from_, to_) res = stats.kstest(t.cpu().to(torch.double), 'uniform', args=(from_, (to_ - from_))) self.assertTrue(res.statistic < 0.1) @skipIfNoSciPy @dtypes(*torch.testing.get_all_fp_dtypes(include_bfloat16=False)) @dtypesIfCUDA(*torch.testing.get_all_fp_dtypes()) def test_normal_kstest(self, device, dtype): from scipy import stats size = 1000 for mean in [-10, 0, 50]: for std in [1, 5, 10]: t = torch.empty(size, dtype=dtype, device=device).normal_(mean=mean, std=std) res = stats.kstest(t.cpu().to(torch.double), 'norm', args=(mean, std)) self.assertTrue(res.statistic < 0.1) @skipIfNoSciPy @dtypes(*torch.testing.get_all_fp_dtypes()) def test_lognormal_kstest(self, device, dtype): from scipy import stats size = 1000 for mean in [-3, 0, 7]: for std in [1, 5, 7]: t = torch.empty(size, dtype=dtype, device=device).log_normal_(mean=mean, std=std) res = stats.kstest(t.cpu().to(torch.double), 'lognorm', args=(std, 0, math.exp(mean))) if dtype == torch.half: self.assertTrue(res.statistic < 0.3) else: self.assertTrue(res.statistic < 0.1) @skipIfNoSciPy @dtypes(*torch.testing.get_all_fp_dtypes()) def test_exponential_kstest(self, device, dtype): from scipy import stats size = 1000 for lambd in [0.5, 1.0, 5.0]: t = torch.empty(size, dtype=dtype, device=device).exponential_(lambd=lambd) res = stats.kstest(t.cpu().to(torch.double), 'expon', args=(0, 1 / lambd,)) self.assertTrue(res.statistic < 0.1) @skipIfNoSciPy @dtypes(*torch.testing.get_all_fp_dtypes()) def test_cauchy_kstest(self, device, dtype): from scipy import stats size = 1000 for median in [-10, 0, 50]: for sigma in [0.5, 1.0, 10.0]: t = torch.empty(size, dtype=dtype, device=device).cauchy_(median=median, sigma=sigma) res = stats.kstest(t.cpu().to(torch.double), 'cauchy', args=(median, sigma)) self.assertTrue(res.statistic < 0.1) @skipIfNoSciPy @dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes())) def test_geometric_kstest(self, device, dtype): from scipy import stats size = 1000 for p in [0.2, 0.5, 0.8]: t = torch.empty(size, dtype=dtype, device=device).geometric_(p=p) actual = np.histogram(t.cpu().to(torch.double), np.arange(1, 100))[0] expected = stats.geom(p).pmf(np.arange(1, 99)) * size res = stats.chisquare(actual, expected) self.assertEqual(res.pvalue, 1.0, atol=0.1, rtol=0) def test_pairwise_distance_empty(self, device): shape = (2, 0) x = torch.randn(shape, device=device) y = torch.randn(shape, device=device) self.assertEqual(torch.zeros(2, device=device), torch.pairwise_distance(x, y)) self.assertEqual(torch.zeros((2, 1), device=device), torch.pairwise_distance(x, y, keepdim=True)) shape = (0, 2) x = torch.randn(shape, device=device) y = torch.randn(shape, device=device) self.assertEqual(torch.zeros(0, device=device), torch.pairwise_distance(x, y)) self.assertEqual(torch.zeros((0, 1), device=device), torch.pairwise_distance(x, y, keepdim=True)) def test_pdist_empty(self, device): shape = (0, 2) x = torch.randn(shape, device=device) self.assertEqual(torch.empty(0, device=device), torch.pdist(x)) shape = (1, 2) x = torch.randn(shape, device=device) self.assertEqual(torch.empty(0, device=device), torch.pdist(x)) shape = (3, 0) x = torch.randn(shape, device=device) self.assertEqual(torch.zeros(3, device=device), torch.pdist(x)) def test_cdist_empty(self, device): x = torch.randn((0, 5), device=device) y = torch.randn((4, 5), device=device) self.assertEqual(torch.empty(0, 4, device=device), torch.cdist(x, y)) x = torch.randn((2, 5), device=device) y = torch.randn((0, 5), device=device) self.assertEqual(torch.empty(2, 0, device=device), torch.cdist(x, y)) x = torch.randn((2, 0), device=device) y = torch.randn((3, 0), device=device) self.assertEqual(torch.zeros(2, 3, device=device), torch.cdist(x, y)) x = torch.randn((2, 0), device=device) y = torch.randn((0, 0), device=device) self.assertEqual(torch.empty(2, 0, device=device), torch.cdist(x, y)) def _brute_cdist(self, x, y, p=2): r1 = x.shape[-2] r2 = y.shape[-2] if r1 == 0 or r2 == 0: return torch.empty(r1, r2, device=x.device) return torch.norm(x[..., None, :] - y[..., None, :, :], p=p, dim=-1) def test_cdist_norm(self, device): for r1 in [3, 4, 5, 6]: for m in [2, 3, 4, 10]: for r2 in [4, 6, 7, 8]: for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: x = torch.randn(r1, m, device=device) y = torch.randn(r2, m, device=device) if p == 2: for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertEqual(expected, actual, rtol=0, atol=0.02) else: actual = torch.cdist(x, y, p=p) expected = self._brute_cdist(x, y, p=p) self.assertEqual(expected, actual) def test_cdist_norm_batch(self, device): for r1 in [3, 4, 5, 6]: for m in [2, 3, 4, 10]: for r2 in [4, 6, 7, 8]: for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: x = torch.randn(2, 3, 6, r1, m, device=device) y = torch.randn(2, 3, 6, r2, m, device=device) if p == 2: for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertEqual(expected, actual, rtol=0, atol=0.02) else: actual = torch.cdist(x, y, p=p) expected = self._brute_cdist(x, y, p=p) self.assertEqual(expected, actual) @onlyCUDA def test_cdist_cuda_backward(self, device): for l1 in [1, 511, 513]: for l2 in [1, 511, 513]: for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: x1 = torch.randn(4, l1, 32, device=device, requires_grad=True) x2 = x1.clone().detach_().requires_grad_() y1 = torch.randn(4, l2, 32, device=device, requires_grad=True) y2 = y1.clone().detach_().requires_grad_() if p == 2: for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: z1 = torch.cdist(x1, y1, p=2, compute_mode=cm).mean() z2 = self._brute_cdist(x2, y2, p=2).mean() z1.backward() z2.backward() self.assertEqual(x1.grad, x2.grad, rtol=0, atol=0.001) self.assertEqual(y1.grad, y2.grad, rtol=0, atol=0.001) else: z1 = torch.cdist(x1, y1, p=p).mean() z2 = self._brute_cdist(x2, y2, p=p).mean() self.assertEqual(x1.grad, x2.grad, rtol=0, atol=0.001) self.assertEqual(y1.grad, y2.grad, rtol=0, atol=0.001) @tf32_on_and_off(0.005) def test_cdist_large(self, device): for cm in ['use_mm_for_euclid_dist_if_necessary', 'use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: x = torch.randn(1000, 10, device=device) y = torch.randn(1000, 10, device=device) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertEqual(expected, actual) @slowTest @tf32_on_and_off(0.01) def test_cdist_large_batch(self, device): for cm in ['use_mm_for_euclid_dist_if_necessary', 'use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: x = torch.randn(4, 3, 1000, 10, device=device) y = torch.randn(4, 3, 1000, 10, device=device) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertEqual(expected, actual) @tf32_on_and_off(0.005) def test_cdist_non_contiguous(self, device): for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: x = torch.randn(5, 7, device=device).transpose(-1, -2) y = torch.randn(5, 3, device=device).transpose(-1, -2) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertFalse(x.is_contiguous()) self.assertFalse(y.is_contiguous()) self.assertEqual(expected, actual) x = torch.randn(7, 5, device=device) y = torch.randn(5, 3, device=device).t() actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertTrue(x.is_contiguous()) self.assertFalse(y.is_contiguous()) self.assertEqual(expected, actual) x = torch.randn(5, 7, device=device).t() y = torch.randn(3, 5, device=device) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertFalse(x.is_contiguous()) self.assertTrue(y.is_contiguous()) self.assertEqual(expected, actual) @tf32_on_and_off() def test_cdist_non_contiguous_batch(self, device): for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: x = torch.randn(4, 3, 2, 5, 7, device=device).transpose(-1, -2) y = torch.randn(4, 3, 2, 5, 3, device=device).transpose(-1, -2) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertFalse(x.is_contiguous()) self.assertFalse(y.is_contiguous()) self.assertEqual(expected, actual) x = torch.randn(7, 2, 7, 5, device=device) y = torch.randn(7, 2, 5, 3, device=device).transpose(-1, -2) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertTrue(x.is_contiguous()) self.assertFalse(y.is_contiguous()) self.assertEqual(expected, actual) x = torch.randn(4, 5, 7, device=device).transpose(-1, -2) y = torch.randn(4, 3, 5, device=device) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertFalse(x.is_contiguous()) self.assertTrue(y.is_contiguous()) self.assertEqual(expected, actual) def test_multinomial_constraints(self, device): x = torch.empty(1, 2, 3, dtype=torch.double, device=device) self.assertRaisesRegex( RuntimeError, "prob_dist must be 1 or 2 dim", lambda: torch.multinomial(x, 2)) x = torch.empty(1, 2, dtype=torch.long, device=device) self.assertRaisesRegex( RuntimeError, "multinomial only supports floating-point dtypes for input", lambda: torch.multinomial(x, 2)) x = torch.empty(1, 2, dtype=torch.double, device=device) y = torch.empty(1, 2, dtype=torch.double, device=device) self.assertRaisesRegex( RuntimeError, "multinomial expects Long tensor out", lambda: torch.multinomial(x, 2, out=y)) x = torch.empty(2, dtype=torch.double, device=device) self.assertRaisesRegex( RuntimeError, "cannot sample n_sample <= 0 samples", lambda: torch.multinomial(x, 0)) x = torch.empty(2, dtype=torch.double, device=device) self.assertRaisesRegex( RuntimeError, "cannot sample n_sample <= 0 samples", lambda: torch.multinomial(x, -1)) x = torch.empty(2, dtype=torch.double, device=device) self.assertRaisesRegex( RuntimeError, "cannot sample n_sample > prob_dist", lambda: torch.multinomial(x, 3, False)) x = torch.empty(16777217, dtype=torch.double, device=device) self.assertRaisesRegex( RuntimeError, "number of categories cannot exceed", lambda: torch.multinomial(x, 3)) def test_cumsum(self, device): x = torch.rand(100, 100, device=device) res1 = torch.cumsum(x, 1) res2 = torch.tensor([]).to(device) torch.cumsum(x, 1, out=res2) self.assertEqual(res1, res2) x.cumsum_(1) self.assertEqual(res1, x) a = torch.tensor([[True, False, True], [False, False, False], [True, True, True]], device=device) b = a.byte() aRes = torch.cumsum(a, 0) bRes = torch.cumsum(b, 0) self.assertEqual(aRes, bRes) self.assertEqual(aRes, torch.tensor([[1, 0, 1], [1, 0, 1], [2, 1, 2]])) aRes = torch.cumsum(a, 1) bRes = torch.cumsum(b, 1) self.assertEqual(aRes, bRes) self.assertEqual(aRes, torch.tensor([[1, 1, 2], [0, 0, 0], [1, 2, 3]])) # Check that cummulative sum over a zero length dimension doesn't crash on backprop. # Also check that cumsum over other dimensions in a tensor with a zero-length # dimensiuon also works # Also include a basic suite of similar tests for other bases cases. shapes = [[2, 0], [2, 1, 4], [0, 2, 3], [1], [5]] for shape in shapes: for dim in range(len(shape)): raw_tensor = torch.zeros(*shape, requires_grad=True) integrated = raw_tensor.cumsum(dim=dim) # Check that backward does not crash integrated.sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) # Check a scalar example raw_tensor = torch.tensor(3., requires_grad=True) integrated = raw_tensor.cumsum(dim=-1) self.assertEqual(raw_tensor, integrated) # Check that backward does not crash integrated.sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) def test_cumprod(self, device): x = torch.rand(100, 100, device=device) res1 = torch.cumprod(x, 1) res2 = torch.tensor([]).to(device) torch.cumprod(x, 1, out=res2) self.assertEqual(res1, res2) x.cumprod_(1) self.assertEqual(res1, x) a = torch.tensor([[True, False, True], [False, False, False], [True, True, True]], dtype=torch.bool, device=device) b = a.byte() aRes = torch.cumprod(a, 0) bRes = torch.cumprod(b, 0) self.assertEqual(aRes, bRes) self.assertEqual(aRes, torch.tensor([[1, 0, 1], [0, 0, 0], [0, 0, 0]])) aRes = torch.cumprod(a, 1) bRes = torch.cumprod(b, 1) self.assertEqual(aRes, bRes) self.assertEqual(aRes, torch.tensor([[1, 0, 0], [0, 0, 0], [1, 1, 1]])) # Check that cummulative prod over a zero length dimension doesn't crash on backprop. # Also check that cumprod over other dimensions in a tensor with a zero-length # dimensiuon also works # Also include a basic suite of similar tests for other bases cases. shapes = [[2, 0], [2, 1, 4], [0, 2, 3], [1], [5]] for shape in shapes: for dim in range(len(shape)): raw_tensor = torch.zeros(*shape, requires_grad=True) integrated = raw_tensor.cumprod(dim=dim) # Check that backward does not crash integrated.sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) # Check a scalar example raw_tensor = torch.tensor(3., requires_grad=True) integrated = raw_tensor.cumprod(dim=-1) self.assertEqual(raw_tensor, integrated) # Check that backward does not crash integrated.sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) def test_cummax_cummin(self, device): def test_ops(op, string_of_function_name, expected_output1, expected_output2): x = torch.rand(100, 100, device=device) out1 = op(x, 1) res2 = torch.empty(0, device=device) indices2 = torch.empty(0, dtype=torch.int64, device=device) op(x, 1, out=(res2, indices2)) self.assertEqual(out1[0], res2) self.assertEqual(out1[1], indices2) a = torch.tensor([[True, False, True], [False, False, False], [True, True, True]], dtype=torch.bool, device=device) b = a.byte() aRes = op(a, 0) bRes = op(b, 0) self.assertEqual(aRes[0], bRes[0].bool()) self.assertEqual(aRes[0], expected_output1.bool()) # test inf and nan input x = torch.tensor([4, inf, 1.5, -inf, 0, nan, 1]) xRes = op(x, 0)[0] self.assertEqual(xRes, expected_output2) # op shouldn't support values, indices with a dtype, device type or layout # different from that of input tensor t = torch.randn(10) values = torch.empty(0, dtype=torch.int16) indices = torch.empty(0, dtype=torch.int64) with self.assertRaisesRegex( RuntimeError, 'expected scalar_type Float but found Short'): op(t, 0, out=(values, indices)) # Check that op over a zero length dimension doesn't crash on backprop. # Also check that op over other dimensions in a tensor with a zero-length # dimension also works # Also include a basic suite of similar tests for other bases cases. shapes = [[2, 0], [2, 1, 4], [0, 2, 3], [1], [5]] for shape in shapes: for dim in range(len(shape)): raw_tensor = torch.zeros(*shape, requires_grad=True) integrated = getattr(raw_tensor, string_of_function_name)(dim=dim) # Check that backward does not crash integrated[0].sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) # Check a scalar example raw_tensor = torch.tensor(3., requires_grad=True) integrated = getattr(raw_tensor, string_of_function_name)(dim=-1) # Check that backward does not crash integrated[0].sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) expected_out = torch.tensor([4, inf, inf, inf, inf, nan, nan]) test_ops(torch.cummax, "cummax", torch.tensor([[1, 0, 1], [1, 0, 1], [1, 1, 1]]), expected_out) expected_out = torch.tensor([4, 4, 1.5, -inf, -inf, nan, nan]) test_ops(torch.cummin, "cummin", torch.tensor([[1, 0, 1], [0, 0, 0], [0, 0, 0]]), expected_out) def test_logcumsumexp(self, device): def logcumsumexp(a, axis): return torch.cumsum(a.exp(), axis=axis).log_() axis = -1 a = torch.randn(100, 100, device=device) actual = a.logcumsumexp(axis) expected = logcumsumexp(a, axis) self.assertEqual(a.dtype, actual.dtype) self.assertEqual(expected.shape, actual.shape) self.assertEqual(expected, actual) # check -inf and nan handling x = torch.tensor([-float('inf'), -float('inf'), 1.0, 1.0, float('inf'), float('inf'), float('nan'), 1.0, 1.0], device=device) x2d = x.unsqueeze(0).expand(2, -1) for inp in (x, x2d): actual = inp.logcumsumexp(axis) expected = logcumsumexp(inp, axis) self.assertEqual(expected, actual) # Check that out is actually inplace b = torch.randn(5, 2, device=device) inplace_out = torch.zeros(5, 2, device=device) expected = logcumsumexp(b, axis) torch.logcumsumexp(b, axis=axis, out=inplace_out) self.assertEqual(inplace_out, expected) # Check input and inplace_output type mismatch b = torch.randn(5, 2, device=device, dtype=torch.float64) inplace_out = torch.zeros(5, 2, device=device, dtype=torch.float32) with self.assertRaisesRegex( RuntimeError, 'expected scalar_type Double but found Float'): torch.logcumsumexp(b, axis, out=inplace_out) def _test_diff_numpy(self, t, dims=None): # Helper for test_diff to compare with NumPy reference implementation def to_np(t): if t.dtype == torch.bfloat16: return t.to(dtype=torch.float, device="cpu").numpy() else: return t.cpu().numpy() for dim in dims if dims else range(t.dim()): prepend = t.narrow(dim, 0, 1) append = t.narrow(dim, 0, 1) np_t = to_np(t) # test when prepend and append's size along dim is 1 actual = torch.diff(t, dim=dim, prepend=prepend, append=append) expected = torch.from_numpy(np.diff(np_t, axis=dim, prepend=to_np(prepend), append=to_np(append))) self.assertEqual(actual, expected.to(t.dtype)) # test when prepend and append's size along dim != 1 actual = torch.diff(t, dim=dim, prepend=t, append=t) expected = torch.from_numpy(np.diff(np_t, axis=dim, prepend=np_t, append=np_t)) self.assertEqual(actual, expected.to(t.dtype)) # All tensors appear contiguous on XLA @onlyOnCPUAndCUDA @dtypes(*torch.testing.get_all_dtypes()) def test_diff_noncontig(self, device, dtype): shapes = ( (1,), (1, 5), (3, 5), (1, 5, 1), (2, 3, 5)) for shape in shapes: contig = make_tensor(shape, device, dtype, low=-9, high=9) non_contig = torch.empty(shape + (2, 2), device=device, dtype=dtype)[..., 0] non_contig = non_contig.select(-1, -1) non_contig.copy_(contig) self.assertTrue(not non_contig.is_contiguous() or shape == (1,)) self._test_diff_numpy(non_contig) # RngNormal not implemented for type f16 for XLA @dtypes(*torch.testing.get_all_dtypes(include_half=False)) @dtypesIfCPU(*torch.testing.get_all_dtypes()) @dtypesIfCUDA(*torch.testing.get_all_dtypes()) def test_diff(self, device, dtype): shapes = ( (1,), (1, 5), (3, 5), (1, 5, 1), (2, 3, 5)) for shape in shapes: contig = make_tensor(shape, device, dtype, low=-9, high=9) self._test_diff_numpy(contig) t = torch.ones(2, 3) with self.assertRaisesRegex( RuntimeError, 'diff expects prepend or append to be the same dimension as input'): invalid_prepend = torch.tensor([1, 2, 3], device=device, dtype=dtype) t.diff(dim=0, prepend=invalid_prepend) with self.assertRaisesRegex( RuntimeError, 'diff expects the shape of tensor to prepend or append to match that of input'): invalid_prepend = torch.tensor([[0, 1]], device=device, dtype=dtype) t.diff(dim=0, prepend=invalid_prepend) with self.assertRaisesRegex( RuntimeError, 'diff only supports n = 1 currently'): torch.diff(t, n=2) with self.assertRaisesRegex( RuntimeError, 'diff expects input to be at least one-dimensional'): scalar = torch.tensor(2, device=device, dtype=dtype) torch.diff(scalar) # if the given input arg is not a list, it returns a list of single element: [arg] def _wrap_to_list(self, input_array): return input_array if isinstance(input_array, list) else [input_array] # To ensure inf, -inf, and nan values do not cause divergence between Numpy and PyTorch. # There are two types of possible divergence: # 1. When we compute a,b both real numbers and has very small absolute values (i.e. very near to 0.0) # then, result of a/b be inf, -inf and nan, and this cause divergence. # 2. When we are dividing complex numbers by zero. For example, when a = torch.tensor(3+5j) we have # a/0 to be equal to nan + nan*j in PyTorch and inf + inf*j in Numpy. def _inf_nan_preprocess(self, actual, expected): for i in range(len(expected)): expected[i] = np.nan_to_num(expected[i], nan=nan, posinf=nan, neginf=nan) # nan_to_num is not defined for complex tensors in PyTorch. if actual[i].dtype == torch.complex64 : actual[i].real = torch.nan_to_num(actual[i].real, nan=nan, posinf=nan, neginf=nan) actual[i].imag = torch.nan_to_num(actual[i].imag, nan=nan, posinf=nan, neginf=nan) else: actual[i] = torch.nan_to_num(actual[i], nan=nan, posinf=nan, neginf=nan) return actual, expected @onlyOnCPUAndCUDA @dtypes(torch.long, torch.float32, torch.complex64) def test_gradient_all(self, device, dtype): def create_scalar(shape): return make_tensor((1,), device='cpu', dtype=dtype, low=1.).item() def create_list(shape): return make_tensor((len(shape),), device='cpu', dtype=dtype, low=1.).tolist() def create_coordinate_tensors(shape): tensor_list = [] for i in range(len(shape)): tensor_list.append(make_tensor((shape[i],), device=device, dtype=dtype)) return tensor_list def filter_shape(shape, dim): filtered_shape = [] for i in range(len(dim)): filtered_shape.append(shape[dim[i]]) return filtered_shape # shape, dims format test_cases = ( ((5,), (0,)), ((4, 4), (0, 1)), ((3, 3, 3), (-1, 0)), ((4, 4, 4), (2,)), ((4, 4, 4), (0, 1)), ((4, 4, 4, 3), (0, 2, 3)), ((4, 5, 3, 4, 3), (1, 2)), ((4, 3, 6, 5, 3), (2, 4)), ((4, 3, 3, 5, 3), (0, 1, 2, 3, 4)), ) for case, contig, edge_order, space_fn in product(test_cases, [True, False], [1, 2], (create_scalar, create_list, create_coordinate_tensors)): shape, dims = case # filter shape by dims before passing filtered shape to create_* functions filtered_shape = filter_shape(shape, dims) spacing = space_fn(filtered_shape) t = make_tensor(shape, device=device, dtype=dtype, noncontiguous=not contig) t_np = t.cpu().numpy() actual = torch.gradient(t, spacing=spacing, dim=dims, edge_order=edge_order) if space_fn == create_coordinate_tensors and spacing[0].device != 'cpu': spacing = [space.cpu().detach().numpy() for space in spacing] expected = np.gradient(t_np, *self._wrap_to_list(spacing), axis=dims, edge_order=edge_order) actual, expected = self._inf_nan_preprocess(list(actual), self._wrap_to_list(expected)) self.assertEqual(actual, expected, equal_nan="relaxed", atol=1e-4, rtol=0, exact_dtype=False) @onlyOnCPUAndCUDA @dtypes(torch.long, torch.float32, torch.complex64) def test_gradient_extreme_cases(self, device, dtype): # Test behaviour for inf and nan values actual = torch.gradient(torch.tensor([2, -2, inf, inf, -inf, -inf, inf, 3, -inf, 2, nan, nan, 3, inf, nan])) expected = np.gradient(np.array([2, -2, inf, inf, -inf, -inf, inf, 3, -inf, 2, nan, nan, 3, inf, nan])) self.assertEqual(actual, self._wrap_to_list(expected), exact_dtype=False) # Test behaviour in very big tensors large_size = 100000 t = make_tensor((large_size,), device, dtype) t_np = t.cpu().numpy() coordinates_np = list(np.random.randn(large_size)) coordinates = [torch.tensor(coordinates_np, device=device)] actual = torch.gradient(t, spacing=coordinates, dim=0, edge_order=1) expected = [np.gradient(t_np, coordinates_np, axis=0, edge_order=1)] self.assertEqual(actual, expected, exact_dtype=False) actual = torch.gradient(t, spacing=coordinates, dim=0, edge_order=2) expected = [np.gradient(t_np, coordinates_np, axis=0, edge_order=2)] self.assertEqual(actual, expected, exact_dtype=False) @onlyOnCPUAndCUDA def test_gradient_type_promotion(self, device): inputs = ( make_tensor((4, 4), device=device, dtype=torch.float32), make_tensor((4, 4), device=device, dtype=torch.complex64), make_tensor((4, 4), device=device, dtype=torch.int64), ) spacing = ( make_tensor((1,), device='cpu', dtype=torch.float32).item(), make_tensor((1,), device='cpu', dtype=torch.int64).item(), make_tensor((1,), device='cpu', dtype=torch.complex64).item(), make_tensor((2,), device='cpu', dtype=torch.float32, low=0.1).tolist(), make_tensor((2,), device='cpu', dtype=torch.int64, low=1).tolist(), make_tensor((2,), device='cpu', dtype=torch.complex64).tolist(), [make_tensor((4,), device=device, dtype=torch.float32), make_tensor((4,), device=device, dtype=torch.float32)], [make_tensor((4,), device=device, dtype=torch.int64), make_tensor((4,), device=device, dtype=torch.int64)], [make_tensor((4,), device=device, dtype=torch.complex64), make_tensor((4,), device=device, dtype=torch.complex64)], ) for input, spacing_or_coord, edge_order in product(inputs, spacing, [1, 2]): input_np = input.cpu().numpy() input_np = input.cpu().numpy() actual = torch.gradient(input, spacing=spacing_or_coord, dim=(0, 1), edge_order=edge_order) spacing_or_coord_wrapped = self._wrap_to_list(spacing_or_coord) spacing_or_coord_np = [] if torch.is_tensor(spacing_or_coord_wrapped[0]) and torch.device(spacing_or_coord_wrapped[0].device).type != 'cpu': for i in range(len(spacing_or_coord_wrapped)): spacing_or_coord_np.append(spacing_or_coord_wrapped[i].detach().clone().cpu().numpy()) else: spacing_or_coord_np = spacing_or_coord_wrapped expected = np.gradient(input_np, *spacing_or_coord_np, axis=(0, 1), edge_order=edge_order) if actual[0].dtype == torch.complex64 and input.dtype != torch.complex64: for i in range(len(actual)): self.assertEqual(actual[i].real, expected[i].real, exact_dtype=False) # Type promotion fails on Numpy when spacing is given as complex number and input is given as real. # Result is given just as real number and all the imaginary parts to be equal to zero. self.assertEqual(expected[i].imag, torch.zeros(actual[i].shape), exact_dtype=False) else: actual, expected = self._inf_nan_preprocess(list(actual), expected) self.assertEqual(actual, expected, equal_nan="relaxed", exact_dtype=False) @onlyOnCPUAndCUDA @dtypes(torch.long, torch.float32, torch.complex64) def test_error_gradient(self, device, dtype): t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device, dtype=dtype) with self.assertRaisesRegex(RuntimeError, 'torch.gradient expected spacing to be unspecified, a scalar '): dim = (1, 0) spacing = [0.1] torch.gradient(t, spacing=spacing, dim=dim, edge_order=1) with self.assertRaisesRegex(RuntimeError, 'torch.gradient only supports edge_order=1 and edge_order=2.'): torch.gradient(t, edge_order=3) with self.assertRaisesRegex(RuntimeError, 'dim 1 appears multiple times in the list of dims'): dim = (1, 1) spacing = 0.1 torch.gradient(t, spacing=spacing, dim=dim, edge_order=1) with self.assertRaisesRegex(RuntimeError, 'torch.gradient expected each tensor to be on the same device,'): dim = (0, 1) coordinates = [torch.tensor([1, 2, 4], device='cpu'), torch.tensor([1, 2, 4], device='meta')] torch.gradient(t, spacing=coordinates, dim=dim, edge_order=1) with self.assertRaises(IndexError): torch.gradient(t, dim=3) with self.assertRaisesRegex(RuntimeError, 'torch.gradient expected each dimension size to be at least'): torch.gradient(torch.tensor([[1], [2], [3]]), edge_order=1) with self.assertRaisesRegex(RuntimeError, 'torch.gradient expected each dimension size to be at least'): torch.gradient(torch.tensor([[1, 2], [3, 4]]), edge_order=2) def _test_large_cum_fn_helper(self, x, fn): x_cpu = x.cpu().float() expected = fn(x_cpu) actual = fn(x).cpu().float() self.assertEqual(expected, actual.cpu().float()) @unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "sandcastle OOM with current tpx gpu/re configuration") @onlyCUDA @dtypesIfCUDA(torch.half) # only small dtype not to get oom def test_large_cumsum(self, device, dtype): # initialization to avoid overflow and half caveats x = torch.empty(2**30 + 200, device=device, dtype=dtype) x[::3] = -3 x[1::3] = 2 x[2::3] = 1 self._test_large_cum_fn_helper(x, lambda x: torch.cumsum(x, 0)) @onlyCUDA @dtypesIfCUDA(torch.half) # only small dtype not to get oom def test_large_cumprod(self, device, dtype): # initialization to avoid overflow and half caveats x = torch.empty(2**30 + 200, device=device, dtype=dtype) x[::3] = 8 x[1::3] = .25 x[2::3] = .5 self._test_large_cum_fn_helper(x, lambda x: torch.cumprod(x, 0)) def test_discontiguous_out_cumsum(self, device): x = torch.randn(4, 8, device=device) y = torch.empty(4, 16, device=device)[:, ::2] out = torch.cumsum(x, 0) torch.cumsum(x, 0, out=y) self.assertFalse(y.is_contiguous()) self.assertEqual(out, y, atol=0., rtol=0.) def _test_cumminmax_helper(self, x, fn, expected_val, expected_ind): val, ind = fn(x, -1) self.assertEqual(val, expected_val, atol=0, rtol=0) self.assertEqual(ind, expected_ind, atol=0, rtol=0) out_val = torch.empty_like(val).t().contiguous().t() out_ind = torch.empty_like(ind).t().contiguous().t() fn(x, -1, out=(out_val, out_ind)) self.assertFalse(out_val.is_contiguous()) self.assertFalse(out_ind.is_contiguous()) self.assertEqual(out_val, expected_val, atol=0, rtol=0) self.assertEqual(out_ind, expected_ind, atol=0, rtol=0) def test_cummax_discontiguous(self, device): x = torch.tensor([[0, 1, 2, 3, 2, 1], [4, 5, 6, 5, 6, 7]], device=device, dtype=torch.float).t().contiguous().t() expected_val = torch.tensor([[0, 1, 2, 3, 3, 3], [4, 5, 6, 6, 6, 7]], device=device, dtype=torch.float) expected_ind = torch.tensor([[0, 1, 2, 3, 3, 3], [0, 1, 2, 2, 4, 5]], device=device, dtype=torch.long) self._test_cumminmax_helper(x, torch.cummax, expected_val, expected_ind) def test_cummin_discontiguous(self, device): x = torch.tensor([[3, 2, 1, 0, 1, 2], [7, 6, 5, 4, 5, 2]], device=device, dtype=torch.float).t().contiguous().t() expected_val = torch.tensor([[3, 2, 1, 0, 0, 0], [7, 6, 5, 4, 4, 2]], device=device, dtype=torch.float) expected_ind = torch.tensor([[0, 1, 2, 3, 3, 3], [0, 1, 2, 3, 3, 5]], device=device, dtype=torch.long) self._test_cumminmax_helper(x, torch.cummin, expected_val, expected_ind) def test_bool_tensor_value_change(self, device): x = torch.tensor([True, False], dtype=torch.bool, device=device) x[0] = False x[1] = True self.assertEqual(x, torch.tensor([False, True], dtype=torch.bool, device=device)) def test_unfold_all_devices_and_dtypes(self, device): for dt in torch.testing.get_all_dtypes(): if dt == torch.bool: x = torch.empty((0, 1, 3, 0), dtype=dt, device=device) self.assertEqual((0, 1, 1, 0, 3), x.unfold(2, 3, 2).shape) else: x = torch.empty((0, 1, 3, 0), dtype=dt, device=device) self.assertEqual((0, 1, 1, 0, 3), x.unfold(2, 3, 2).shape) def test_unfold_scalars(self, device): x = torch.tensor(0.5, device=device) # unfold on a 0-dimensional tensor should always return a 1-d dimensional # tensor of shape [size] (i.e., the second parameter to unfold) self.assertEqual(torch.empty(0, device=device), x.unfold(0, 0, 1)) self.assertEqual(torch.empty(0, device=device), x.unfold(0, 0, 2)) self.assertEqual(torch.tensor([0.5], device=device), x.unfold(0, 1, 1)) def test_copy_all_dtypes_and_devices(self, device): from copy import copy for dt in torch.testing.get_all_dtypes(): x = torch.tensor([1, 2, 3, 4], dtype=dt, device=device) x_clone = x.clone() y = copy(x) y.fill_(1) # copy is a shallow copy, only copies the tensor view, # not the data self.assertEqual(x, y) def test_clone_all_dtypes_and_devices(self, device): for dt in torch.testing.get_all_dtypes(): x = torch.tensor((1, 1), dtype=dt, device=device) y = x.clone() self.assertEqual(x, y) def test_clone_zero_stride_dim(self, device): # stride zero, size 1 axis, not contiguous x = torch.randn(10) y = x.as_strided([2, 1, 5], [1, 0, 2]) self.assertEqual(y, y.clone()) @dtypesIfCUDA(*set(torch.testing.get_all_math_dtypes('cuda'))) @dtypes(*set(torch.testing.get_all_math_dtypes('cpu'))) def test_addcmul(self, device, dtype): # Returns floating or integral scalar corresponding to dtype def _number(floating, integer, dtype): if dtype in [torch.half, torch.float, torch.double, torch.bfloat16]: return floating elif dtype in [torch.cfloat, torch.cdouble]: return floating * (1 + 1j) else: return integer def rand_tensor(size, dtype, device): if dtype.is_floating_point or dtype.is_complex: return torch.rand(size=size, dtype=dtype, device=device) if dtype == torch.uint8: return torch.randint(1, 5, size=size, dtype=dtype, device=device) else: return torch.randint(-5, 5, size=size, dtype=dtype, device=device) a = rand_tensor((2, 2), dtype=dtype, device=device) b = rand_tensor((2, 2), dtype=dtype, device=device) c = rand_tensor((2, 2), dtype=dtype, device=device) alpha = _number(0.5, 3, dtype) actual = torch.addcmul(a, b, c, value=alpha) expected = a + alpha * b * c self.assertEqual(expected, actual) with self.assertWarnsOnceRegex( UserWarning, "This overload of addcmul is deprecated"): self.assertEqual(actual, torch.addcmul(a, alpha, b, c)) if self.device_type == 'cuda' and dtype == torch.half: a = torch.tensor([60000.0], device=device, dtype=dtype) b = torch.tensor([60000.0], device=device, dtype=dtype) c = torch.tensor([2.0], device=device, dtype=dtype) out = torch.addcmul(a, b, c, value=-1) self.assertTrue(not (out.isnan() or out.isinf())) def test_narrow_empty(self, device): x = torch.randn(2, 3, 4, device=device) for d in range(x.dim()): y = x.narrow(d, x.size(d), 0) sz = list(x.size()) sz[d] = 0 self.assertEqual(sz, y.size()) @dtypes(*torch.testing.get_all_dtypes()) def test_index_copy(self, device, dtype): # We just test for num_copy <= num_dest, as otherwise there are repeated indices # and the behavior is undefined num_copy, num_dest = 3, 5 def make_arg(batch_sizes, n, dim, contig): size_arg = batch_sizes[:dim] + (n,) + batch_sizes[dim:] return make_tensor(size_arg, device, dtype, low=None, high=None, noncontiguous=not contig) def ref_index_copy(tgt, dim, idx, src): for i in range(idx.size(0)): idx_dest = dim * (slice(None),) + (idx[i],) idx_src = dim * (slice(None),) + (i,) tgt[idx_dest] = src[idx_src] # More thorough testing as in index_add for dest_contig, src_contig, index_contig in product([True, False], repeat=3): for other_sizes in ((), (4, 5)): for dim in range(len(other_sizes)): dest = make_arg(other_sizes, num_dest, dim, dest_contig) src = make_arg(other_sizes, num_copy, dim, src_contig) idx = torch.randperm(num_dest, dtype=torch.int64, device=device)[:num_copy] if not index_contig: idx = torch.repeat_interleave(idx, 2, dim=-1) idx = idx[..., ::2] dest2 = dest.clone() dest.index_copy_(dim, idx, src) ref_index_copy(dest2, dim, idx, src) self.assertEqual(dest, dest2) # onlyOnCPUAndCUDA due to an XLA error: # https://github.com/pytorch/pytorch/issues/53256 @onlyOnCPUAndCUDA @dtypes(*torch.testing.get_all_dtypes()) def test_index_copy_scalars(self, device, dtype): # Create the 8 possible combinations of scalar sizes for target / index / source scalars = ((make_tensor(size_t, dtype=dtype, device=device, low=None, high=None), make_tensor(size_i, dtype=torch.int64, device=device, low=0, high=1), make_tensor(size_s, dtype=dtype, device=device, low=None, high=None)) for size_t, size_i, size_s in product([(), (1,)], repeat=3)) for target, idx, source in scalars: target.index_copy_(0, idx, source) self.assertEqual(target.item(), source.item()) @onlyCPU def test_errors_index_copy(self, device): # We do not test the GPU as the CUDA_ASSERT would break the CUDA context idx_dim = 8 tgt_dim = 5 batch_dim = 3 # Too large of an index a = torch.randn(batch_dim, tgt_dim, device=device) idx = torch.full((idx_dim,), tgt_dim, device=device) c = torch.zeros(batch_dim, idx_dim, device=device) with self.assertRaises(IndexError): a.index_copy_(1, idx, c) # Too small (negative indices) idx = torch.full((idx_dim,), -1, device=device) with self.assertRaises(IndexError): a.index_copy_(1, idx, c) # Too small (very negative indices) - they should be unsupported even # when support for negative indices is implemented for index_copy_ idx = torch.full((idx_dim,), -tgt_dim - 1, device=device) with self.assertRaises(IndexError): a.index_copy_(1, idx, c) def _prepare_data_for_index_copy_and_add_deterministic( self, dim: int, device: torch.device ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: assert (dim >= 0 and dim < 3) a = [5, 4, 3] a[dim] = 2000 x = torch.zeros(a, device=device) b = a.copy() elems = a[dim] * 20 b[dim] = elems src = torch.rand(b, device=device) index = torch.randint(a[dim], (elems,), device=device) return (x, index, src) @onlyOnCPUAndCUDA def test_index_copy_deterministic(self, device: torch.device) -> None: for dim in range(3): x, index, src = self._prepare_data_for_index_copy_and_add_deterministic(dim, device) with DeterministicGuard(True): y0 = torch.index_copy(x, dim, index, src) x0 = x.clone().detach() index_list = index.tolist() for i in range(len(index_list)): if dim == 0: x0[index_list[i], :, :] = src[i, :, :] elif dim == 1: x0[:, index_list[i], :] = src[:, i, :] elif dim == 2: x0[:, :, index_list[i]] = src[:, :, i] self.assertEqual(x0, y0, atol=0, rtol=0) @onlyOnCPUAndCUDA def test_index_add_deterministic(self, device: torch.device) -> None: for dim in range(3): x, index, src = self._prepare_data_for_index_copy_and_add_deterministic(dim, device) alpha = random.random() + 1 # on CPU it should be deterministic regardless of the deterministic mode with DeterministicGuard(True): y0 = torch.index_add(x, dim, index, src, alpha=alpha) for _ in range(3): y = torch.index_add(x, dim, index, src, alpha=alpha) self.assertEqual(y, y0, atol=0, rtol=0) with DeterministicGuard(False): for _ in range(3): y_nd = torch.index_add(x, dim, index, src, alpha=alpha) self.assertEqual(y_nd, y0, atol=1e-3, rtol=1e-5) @onlyOnCPUAndCUDA def test_index_put_non_accumulate_deterministic(self, device) -> None: with DeterministicGuard(True): for i in range(3): m = random.randint(10, 20) elems = random.randint(20000, 30000) values = torch.rand(elems, device=device) indices = torch.randint(m, (elems,), device=device) input = torch.rand(m, device=device) output = input.index_put((indices,), values, accumulate=False) input_list = input.tolist() indices_list = indices.tolist() values_list = values.tolist() for i, v in zip(indices_list, values_list): input_list[i] = v self.assertEqual(output, input_list) @dtypes(*torch.testing.get_all_dtypes()) def test_index_fill(self, device, dtype): x = torch.tensor([[1, 2], [4, 5]], dtype=dtype, device=device) index = torch.tensor([0], device=device) x.index_fill_(1, index, 0) self.assertEqual(x, torch.tensor([[0, 2], [0, 5]], dtype=dtype, device=device)) if not x.is_complex(): with self.assertRaisesRegex(RuntimeError, r"Scalar"): x.index_fill_(1, index, 1 + 1j) # Make sure that the result stays 0-dim while applied to # a 0-dim input x = torch.tensor(1, dtype=dtype, device=device) self.assertEqual(0, x.index_fill(0, index, -1).dim()) self.assertEqual(0, x.index_fill_(0, index, -1).dim()) # The test fails for zero-dimensional tensors on XLA @onlyOnCPUAndCUDA @dtypes(*torch.testing.get_all_dtypes()) def test_index_select(self, device, dtype): num_src, num_out = 3, 5 def make_arg(batch_sizes, n, dim, contig): size_arg = batch_sizes[:dim] + (n,) + batch_sizes[dim:] return make_tensor(size_arg, device, dtype, low=None, high=None, noncontiguous=not contig) def ref_index_select(src, dim, idx): # bfloat16 is just used on GPU, so it's not supported on numpy if dtype == torch.bfloat16: src = src.float() out = torch.from_numpy(np.take(src.cpu().numpy(), idx.cpu().numpy(), axis=dim)) if dtype == torch.bfloat16: out = out.to(device=device, dtype=dtype) return out for src_contig, idx_contig in product([True, False], repeat=2): for other_sizes in ((), (4, 5)): for dim in range(len(other_sizes)): src = make_arg(other_sizes, num_src, dim, src_contig) idx = make_tensor((num_out,), device, dtype=torch.int64, low=0, high=num_src, noncontiguous=not idx_contig) out = torch.index_select(src, dim, idx) out2 = ref_index_select(src, dim, idx) self.assertEqual(out, out2) for idx_type in (torch.int32, torch.int64): other_sizes = (3, 2) dim = 1 src = make_arg(other_sizes, num_src, dim, True) idx = make_tensor((num_out,), device, dtype=idx_type, low=0, high=num_src, noncontiguous=False) out = torch.index_select(src, dim, idx) out2 = ref_index_select(src, dim, idx) self.assertEqual(out, out2) # Create the 4 possible combinations of scalar sizes for index / source scalars = ((make_tensor(size_s, device, dtype), torch.zeros(size_i, dtype=torch.int64, device=device)) for size_s, size_i in product([(), (1,)], repeat=2)) for source, idx in scalars: out = source.index_select(0, idx) self.assertEqual(out.item(), source.item()) @dtypes(*torch.testing.get_all_dtypes()) def test_take(self, device, dtype): idx_size = (4,) make_arg = partial(make_tensor, device=device, dtype=dtype) make_idx = partial(make_tensor, low=0, device=device, dtype=torch.int64) def ref_take(src, idx): if dtype == torch.bfloat16: src = src.half() src = src.cpu().numpy() idx = idx.cpu().numpy() out = torch.from_numpy(np.take(src, idx)).to(device=device, dtype=dtype) return out for src_contig, idx_contig, idx_reshape in product([True, False], repeat=3): for src_size in ((5,), (4, 5)): src = make_arg(src_size, noncontiguous=not src_contig) idx = make_idx(idx_size, high=src.numel(), noncontiguous=not idx_contig) if idx_reshape: idx = idx.reshape(2, 2) out = torch.take(src, idx) out2 = ref_take(src, idx) self.assertEqual(out, out2) # Create the 4 possible combinations of scalar sizes for source / index for size_s, size_i in product([(), (1,)], repeat=2): source = make_arg(size_s) idx = make_idx(size_i, high=1) out = source.take(idx) self.assertEqual(out.item(), source.item()) # The bool instance does not work on GPU. See # https://github.com/pytorch/pytorch/issues/54317 @dtypes(*torch.testing.get_all_dtypes(include_bool=False)) def test_put(self, device, dtype): src_size = (4,) make_arg = partial(make_tensor, device=device, dtype=dtype) make_idx = partial(make_tensor, low=0, device=device, dtype=torch.int64) def ref_put(dst, idx, src, accumulate): new_dst = dst.clone(memory_format=torch.contiguous_format).view(-1) new_idx = idx.contiguous().view(-1) new_src = src.contiguous().view(-1) method = new_dst.index_add_ if accumulate else new_dst.index_copy_ return method(0, new_idx, new_src).view_as(dst) for dst_contig, src_contig, idx_contig, idx_reshape, accumulate in product([True, False], repeat=5): for dst_size in ((5,), (4, 5)): dst = make_arg(dst_size, noncontiguous=not dst_contig) src = make_arg(src_size, noncontiguous=not src_contig) # If accumulate=True, `put_` should be deterministic regardless of the inputs on CPU # On CUDA it may not be, but the test has enough tolerance to account for this if accumulate: idx = make_idx(src_size, high=dst.numel()) else: idx = torch.randperm(dst.numel(), dtype=torch.int64, device=device)[:src_size[0]] if not idx_contig: idx = torch.repeat_interleave(idx, 2, dim=-1)[..., ::2] if idx_reshape: idx = idx.reshape(2, 2) out = torch.put(dst, idx, src, accumulate) # out-place reference = ref_put(dst, idx, src, accumulate) self.assertEqual(out, reference) # in-place dst.put_(idx, src, accumulate) self.assertEqual(dst, reference) # Create the 8 possible combinations of scalar sizes for target / index / source scalars = ((make_arg(size_t), make_idx(size_i, high=1), make_arg(size_s)) for size_t, size_i, size_s in product([(), (1,)], repeat=3)) for (dest, idx, source), accumulate in product(scalars, [True, False]): dest_init = dest.clone() # out-place out = torch.put(dest, idx, source, accumulate=accumulate) # in-place dest1 = dest.clone() dest1.put_(idx, source, accumulate=accumulate) for d in [out, dest1]: if accumulate: self.assertEqual(d.item(), (dest_init + source).item()) else: self.assertEqual(d.item(), source.item()) # Empty case dest = make_arg((3, 2)) reference = dest.clone() idx = make_idx((0,), high=1) source = make_arg((0,)) for accumulate in [True, False]: out = torch.put(dest, idx, source, accumulate=accumulate) self.assertEqual(out, reference) dest.put_(idx, source, accumulate=accumulate) self.assertEqual(dest, reference) # The bool instance does not work on GPU. See # https://github.com/pytorch/pytorch/issues/54317 @dtypes(*torch.testing.get_all_dtypes(include_bool=False)) def test_put_accumulate(self, device, dtype): # Test for parallel adds with accumulate == True low_precision = dtype == torch.half or dtype == torch.bfloat16 # Less numbers to avoid overflow with low_precision # Grainsize is 3000 for the for_loop to be parallized on CPU sizes = ((100,)) if low_precision else ((200,), (3002,)) # Bfloat16 has a particularly bad performance here # This operation is nondeterministic on GPU, so we are generous with the rtol rtol, atol = (1e-1, 1e-2) if low_precision else (1e-3, 1e-4) make_arg = partial(make_tensor, low=-2, high=3, device=device, dtype=dtype) # Dump everything into the 0-th position make_idx = partial(torch.zeros, device=device, dtype=torch.int64) args = ((make_idx(size), make_arg(size)) for size in sizes) for idx, source in args: orig = make_arg((1,)) out = orig.put(idx, source, accumulate=True) self.assertEqual(out, orig + source.sum(), rtol=rtol, atol=atol) def test_take_empty(self, device): for input_shape in [(0,), (0, 1, 2, 0), (1, 2, 3)]: for indices_shape in [(0,), (0, 1, 2, 0)]: input = torch.empty(input_shape, device=device) indices = torch.empty(indices_shape, dtype=torch.int64, device=device) self.assertEqual(indices, torch.take(input, indices), exact_dtype=False) def test_put_empty(self, device): for dst_shape in [(0,), (0, 1, 2, 0), (1, 2, 3)]: for indices_shape in [(0,), (0, 1, 2, 0)]: for accumulate in [False, True]: dst = torch.randn(dst_shape, device=device) indices = torch.empty(indices_shape, dtype=torch.int64, device=device) src = torch.randn(indices_shape, device=device) self.assertEqual(dst, dst.put_(indices, src, accumulate=accumulate)) def scatter_allow_reduce(self, device, dtype, reduceop): device_type = torch.device(device).type return device_type != 'cuda' or (reduceop == 'multiply' and dtype.is_floating_point) # torch.{zeros, ones} do not support ComplexHalf (torch.complex32) # So, we are skipping it here. @dtypes(*(torch.testing.get_all_fp_dtypes(include_bfloat16=False, include_half=False) + torch.testing.get_all_complex_dtypes())) @dtypesIfCPU(*torch.testing.get_all_dtypes()) @dtypesIfCUDA(*torch.testing.get_all_dtypes()) def test_scatter_reduce_operations_to_large_input(self, device, dtype): index = torch.tensor([[1], [2]], device=device, dtype=torch.long) test_data = [ (torch.zeros(4, 4, device=device, dtype=dtype), torch.ones(2, 2, device=device, dtype=dtype), torch.tensor([[0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]], device=device, dtype=dtype), "add"), (torch.tensor([2], device=device, dtype=dtype).repeat(4, 4), torch.tensor([6], device=device, dtype=dtype).repeat(2, 2), torch.tensor([[2, 2, 2, 2], [12, 2, 2, 2], [12, 2, 2, 2], [2, 2, 2, 2]], device=device, dtype=dtype), "multiply"), ] for input, src, result, operation in test_data: if not self.scatter_allow_reduce(device, dtype, operation): continue input.scatter_(0, index, src, reduce=operation) self.assertEqual(input, result) # torch.{zeros, ones} do not support ComplexHalf (torch.complex32) # So, we are skipping it here. @dtypes(*(torch.testing.get_all_fp_dtypes(include_bfloat16=False, include_half=False) + torch.testing.get_all_complex_dtypes())) @dtypesIfCPU(*torch.testing.get_all_dtypes()) @dtypesIfCUDA(*torch.testing.get_all_dtypes()) def test_scatter_reduce_scalar(self, device, dtype): index = torch.tensor([[1], [2]], device=device, dtype=torch.long) test_data = [ (torch.zeros(4, 4, device=device, dtype=dtype), 1, torch.tensor([[0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]], device=device, dtype=dtype), "add"), (torch.tensor([2], device=device, dtype=dtype).repeat(4, 4), 2, torch.tensor([[2, 2, 2, 2], [4, 2, 2, 2], [4, 2, 2, 2], [2, 2, 2, 2]], device=device, dtype=dtype), "multiply"), ] for input, src, result, operation in test_data: if not self.scatter_allow_reduce(device, dtype, operation): continue input.scatter_(0, index, src, reduce=operation) self.assertEqual(input, result) # TODO: remove this after scatter_add_ is deprecated. def test_scatter_add_non_unique_index(self, device): height = 2 width = 65536 input = torch.ones(height, width, device=device) index = torch.zeros(height, width, dtype=torch.long, device=device) src = torch.ones(height, width, device=device) input.scatter_add_(0, index, src) self.assertEqual(input, torch.tensor([[3], [1]], device=device, dtype=torch.float32).repeat(1, width)) # torch.{zeros, ones} do not support ComplexHalf (torch.complex32) # So, we are skipping it here. @dtypes(*(torch.testing.get_all_fp_dtypes(include_bfloat16=False, include_half=False) + torch.testing.get_all_complex_dtypes())) @dtypesIfCPU(*torch.testing.get_all_dtypes()) @dtypesIfCUDA(*torch.testing.get_all_dtypes()) def test_scatter_reduce_non_unique_index(self, device, dtype): height = 2 width = 2 index = torch.zeros(height, width, dtype=torch.long, device=device) test_data = [ (torch.ones(height, width, device=device, dtype=dtype), torch.ones(height, width, device=device, dtype=dtype), torch.tensor([[3], [1]], device=device, dtype=dtype).repeat(1, width), "add"), (torch.tensor([2], device=device, dtype=dtype).repeat(height, width), torch.tensor([2], device=device, dtype=dtype).repeat(height, width), torch.tensor([[8], [2]], device=device, dtype=dtype).repeat(1, width), "multiply"), ] for input, src, result, operation in test_data: if not self.scatter_allow_reduce(device, dtype, operation): continue input.scatter_(0, index, src, reduce=operation) self.assertEqual(input, result, msg=f"result: {result} input: {input} method: {str(operation)}") # torch.{zeros, ones} do not support ComplexHalf (torch.complex32) # So, we are skipping it here. @onlyCUDA @dtypesIfCUDA(*(torch.testing.get_all_complex_dtypes() + torch.testing.get_all_int_dtypes())) def test_scatter_reduce_multiply_unsupported_dtypes(self, device, dtype): height = 2 width = 2 index = torch.zeros(height, width, dtype=torch.long, device=device) input = torch.ones(height, width, device=device, dtype=dtype) src = torch.ones(height, width, device=device, dtype=dtype) with self.assertRaises(RuntimeError): input.scatter_(0, index, src, reduce="multiply") def test_scatter_to_large_input(self, device): input = torch.zeros(4, 4, device=device) src = torch.ones(2, 2, device=device) index = torch.tensor([[1], [2]], device=device, dtype=torch.long) input.scatter_(0, index, src) self.assertEqual(input, torch.tensor([[0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]], device=device, dtype=torch.float32)) def test_scatter_add_to_large_input(self, device): input = torch.zeros(4, 4, device=device) src = torch.ones(2, 2, device=device) index = torch.tensor([[1], [2]], device=device, dtype=torch.long) input.scatter_add_(0, index, src) self.assertEqual(input, torch.tensor([[0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]], device=device, dtype=torch.float32)) def test_scatter_bool(self, device): x = torch.tensor([[True, True, True], [True, True, True]], device=device) res = torch.zeros(3, 3, dtype=torch.bool, device=device) res = res.scatter_(0, torch.tensor([[0, 1, 2], [0, 1, 2]], device=device), x) self.assertEqual(res, torch.tensor([[True, False, False], [False, True, False], [False, False, True]], device=device)) def test_scatter_add_bool(self, device): x = torch.tensor([[True, True, True, True, True], [True, True, True, True, True]], device=device) res = torch.zeros(3, 5, dtype=torch.bool, device=device) res = res.scatter_add_(0, torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]], device=device), x) self.assertEqual(res, torch.tensor([[True, True, True, True, True], [False, True, False, True, False], [True, False, True, False, True]], device=device)) @onlyOnCPUAndCUDA @dtypes(*torch.testing.get_all_dtypes()) def test_masked_scatter(self, device, dtype): dt = dtype with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") for maskType in [torch.uint8, torch.bool]: num_copy, num_dest = 3, 10 dest = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dt, device=device) dest2 = dest.clone() dest_ones = dest.clone() dest_ones_expected = dest.clone() src = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=dt, device=device) src_ones = torch.tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=dt, device=device) mask = torch.tensor((0, 0, 0, 0, 1, 0, 1, 0, 1, 0), dtype=maskType, device=device) if dt == torch.bool: # torch.bool is a special case and is being tested # in a separate test return dest.masked_scatter_(mask, src) j = 0 for i in range(num_dest): if mask[i]: dest2[i] = src[j] dest_ones_expected[i] = src_ones[j] j += 1 self.assertEqual(dest, dest2, atol=0, rtol=0) dest_ones.masked_scatter_(mask, src_ones) self.assertEqual(dest_ones, dest_ones_expected, atol=0, rtol=0) # Bound checking in CUDA is done inside a kernel # in order to avoid synchronization, but this means # we can not clear the failures. So there is no way # to test it then recover. if self.device_type != 'cuda': # make src smaller. this should fail src = torch.zeros(num_copy - 1, dtype=dt, device=device) with self.assertRaises(RuntimeError): dest.masked_scatter_(mask, src) # empty tensor dest = torch.empty((5, 0, 5), dtype=dt, device=device) mask = torch.ones_like(dest, dtype=maskType, device=device) src = torch.empty((0,), dtype=dt, device=device) dest.masked_scatter_(mask, src) dest = torch.empty((5, 0, 5), dtype=dt, device=device) mask = torch.ones((5, 1, 5), dtype=maskType, device=device) src = torch.empty((0,), dtype=dt, device=device) dest.masked_scatter_(mask, src) if self.device_type != 'cuda': self.assertEqual(len(w), 5) else: self.assertEqual(len(w), 4) warn = 'masked_scatter_ received a mask with dtype torch.uint8,' for wi in w: self.assertEqual(str(wi.message)[0:55], str(warn)) def test_masked_scatter_bool_tensor(self, device): src = torch.tensor([True, True, True], device=device) dst = torch.tensor([False, False, False], device=device) mask = torch.tensor([False, True, False], device=device) dst.masked_scatter_(mask, src) self.assertEqual(dst, torch.tensor([False, True, False], device=device)) mask = torch.tensor([True, False, True], device=device) dst = dst.masked_scatter(mask, src) self.assertEqual(dst, torch.tensor([True, True, True], device=device)) @onlyCUDA @largeTensorTest('30GB') def test_masked_scatter_large_tensor(self, device): t_cpu = torch.empty(2**31 + 1, dtype=torch.bool).random_() t = t_cpu.to(device) result_cpu = t_cpu.masked_scatter(t_cpu, t_cpu) result = t.masked_scatter(t, t) self.assertEqual(result, result_cpu) @dtypes(*torch.testing.get_all_dtypes()) def test_masked_select(self, device, dtype): if device == 'cpu': warn = 'masked_select received a mask with dtype torch.uint8,' else: warn = 'indexing with dtype torch.uint8 is now deprecated, pl' for maskType in [torch.uint8, torch.bool]: num_src = 10 src = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=dtype, device=device) mask = torch.randint(2, (num_src,), device=device, dtype=maskType) with warnings.catch_warnings(record=True) as w: dst = src.masked_select(mask) if maskType is torch.uint8: self.assertEqual(len(w), 1) self.assertEqual(str(w[0].message)[0:53], str(warn)) dst2 = [] for i in range(num_src): if mask[i]: dst2 += [src[i]] self.assertEqual(dst, torch.tensor(dst2), atol=0, rtol=0) dst3 = torch.empty(0, device=device, dtype=dtype) torch.masked_select(src, mask, out=dst3) self.assertEqual(dst3, torch.tensor(dst2, dtype=dst3.dtype), atol=0, rtol=0) # Since half on CPU is not supported, need to skip the remaining test cases if dtype == torch.half and torch.device(device).type == 'cpu': return # Ensure that masks are expanded to match tensor properly a = torch.rand(100, 100, device=device).mul(100).to(dtype) mask_first_el_each_row = torch.zeros(100, device=device, dtype=torch.bool) mask_first_el_each_row[0] = True a_masked = a.masked_select(mask_first_el_each_row) self.assertEqual(a_masked, a[:, 0]) mask_first_row = torch.zeros(100, 1, device=device, dtype=torch.bool) mask_first_row[0][0] = True a_masked = a.masked_select(mask_first_row) self.assertEqual(a_masked, a[0, :]) # Ensure that tensor is expanded to match mask properly a = torch.rand(100, device=device).mul(100).to(dtype) mask_copy_3_times = torch.tensor([[True], [True], [False], [True]], device=device) a_masked = a.masked_select(mask_copy_3_times) self.assertEqual(a_masked, a.unsqueeze(0).expand(3, 100).flatten()) def test_masked_select_discontiguous(self, device): for size in (10, 200): vals = torch.rand(size, size, device=device) mask = torch.full((size, size), False, dtype=torch.bool, device=device) mask[:, ::2] = True vals_list = (vals, vals.t()) mask_list = (mask, mask.t()) out_dc = torch.empty(size * size, device=device)[::2] for v, m in product(vals_list, mask_list): if m.is_contiguous(): expected = v[:, ::2].clone().view(-1) else: expected = v[::2].clone().view(-1) out = torch.masked_select(v, m) self.assertEqual(out, expected, atol=0, rtol=0) torch.masked_select(v, m, out=out_dc) self.assertEqual(out_dc, expected, atol=0, rtol=0) @dtypes(*product(torch.testing.get_all_dtypes(), (torch.uint8, torch.bool))) def test_masked_fill(self, device, dtypes): dtype = dtypes[0] mask_dtype = dtypes[1] with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") num_dest = 10 dst = torch.zeros(num_dest, dtype=dtype) mask = torch.randint(2, (num_dest,), dtype=mask_dtype) val = random.random() dst2 = dst.clone() dst.masked_fill_(mask, val) for i in range(num_dest): if mask[i]: dst2[i] = val self.assertEqual(dst, dst2, atol=0, rtol=0) # test non-contiguous case dst = ((torch.randn(num_dest, num_dest, num_dest) * 10).to(dtype)).permute((2, 0, 1)) dst2 = dst.contiguous() if dtype.is_complex: mask = dst.abs() > 0 else: mask = dst > 0 self.assertTrue(not dst.is_contiguous()) self.assertTrue(dst2.is_contiguous()) dst.masked_fill_(mask.to(mask_dtype), val) dst2.masked_fill_(mask.to(mask_dtype), val) self.assertEqual(dst, dst2, atol=0, rtol=0) if mask_dtype == torch.uint8: self.assertEqual(len(w), 3) warn = 'masked_fill_ received a mask with dtype torch.uint8,' for wi in w: self.assertEqual(str(wi.message)[0:52], str(warn)) else: self.assertEqual(len(w), 0) def test_masked_fill_bool_tensor(self, device): dst = torch.tensor([True, False, True], device=device) mask = torch.tensor([False, True, False], device=device) dst.masked_fill_(mask, True) self.assertEqual(dst, torch.tensor([True, True, True], device=device)) dst = dst.masked_fill(mask, False) self.assertEqual(dst, torch.tensor([True, False, True], device=device)) def test_tensor_shape_empty(self, device): x = torch.randn((0, 1, 3, 0), device=device) # flatten self.assertEqual((0,), torch.flatten(x, 0, 3).shape) self.assertEqual((0, 0), torch.flatten(x, 0, 2).shape) self.assertEqual((0, 3, 0), torch.flatten(x, 1, 2).shape) # squeeze, unsqueeze self.assertEqual((0, 1, 1, 3, 0), torch.unsqueeze(x, 1).shape) self.assertEqual((0, 3, 0), torch.squeeze(x, 1).shape) self.assertEqual((0, 3, 0), torch.squeeze(x).shape) # transpose, t self.assertEqual((0, 0, 3, 1), torch.transpose(x, 1, 3).shape) y = torch.randn((5, 0), device=device) self.assertEqual((0, 5), y.t().shape) # select self.assertEqual((0, 1, 0), torch.select(x, 2, 2).shape) # repeat, permute self.assertEqual((9, 0, 5, 6, 0), x.repeat(9, 7, 5, 2, 3).shape) self.assertEqual((3, 0, 0, 1), x.permute(2, 3, 0, 1).shape) # diagonal, diagflat self.assertEqual((0,), torch.diagonal(torch.randn((5, 0), device=device)).shape) self.assertEqual((0,), torch.diagonal(torch.randn((0, 5), device=device)).shape) # off the end offsets are valid self.assertEqual((0,), torch.diagonal(torch.randn((5, 0), device=device), offset=1).shape) self.assertEqual((0,), torch.diagonal(torch.randn((0, 5), device=device), offset=1).shape) # check non-zero sized offsets off the end self.assertEqual((5, 6, 0), torch.diagonal(torch.randn((3, 4, 5, 6), device=device), offset=45252).shape) self.assertEqual((5, 6, 0), torch.diagonal(torch.randn((3, 4, 5, 6), device=device), offset=-45252).shape) self.assertEqual((0, 0), torch.diagflat(torch.tensor([], device=device)).shape) self.assertEqual(torch.zeros(1, 1), torch.diagflat(torch.tensor([], device=device), offset=1)) self.assertEqual((0, 0), torch.diagflat(torch.tensor([[]], device=device)).shape) self.assertEqual(torch.zeros(1, 1), torch.diagflat(torch.tensor([[]], device=device), offset=1)) # stack, split, chunk self.assertEqual((4, 0, 1, 3, 0), torch.stack((x, x, x, x)).shape) self.assertEqual([(0, 1, 3, 0)], [z.shape for z in torch.chunk(x, 1, dim=0)]) self.assertEqual([(0, 1, 3, 0), ] * 3, [z.shape for z in torch.chunk(x, 3, dim=0)]) self.assertEqual([(0, 1, 1, 0), ] * 3, [z.shape for z in torch.chunk(x, 3, dim=2)]) # NOTE: split_with_sizes behaves differently than NumPy in that it # takes sizes rather than offsets self.assertEqual([(0, 1, 0, 0), (0, 1, 1, 0), (0, 1, 2, 0)], [z.shape for z in torch.split(x, (0, 1, 2), dim=2)]) self.assertRaises(RuntimeError, lambda: torch.split(x, 0, dim=1)) # This is strange because the split size is larger than the dim size, but consistent with # how split handles that case generally (when no 0s are involved). self.assertEqual([(0, 1, 3, 0)], [z.shape for z in torch.split(x, 1, dim=0)]) self.assertEqual([(0, 1, 3, 0)], [z.shape for z in torch.split(x, 0, dim=0)]) # functions that operate over a dimension but don't reduce. def test_dim_function_empty(self, device): shape = (0, 1, 2, 0) x = torch.randn(shape, device=device) # size stride self.assertEqual(0, x.size(3)) self.assertEqual(2, x.size(2)) self.assertEqual(2, x.stride(0)) self.assertEqual(1, x.stride(2)) self.assertEqual(x, torch.nn.functional.glu(x, 0)) self.assertEqual((0, 1, 1, 0), torch.nn.functional.glu(x, 2).shape) # softmax, logsoftmax self.assertEqual(x, torch.nn.functional.softmax(x, 0)) self.assertEqual(x, torch.nn.functional.softmax(x, 2)) self.assertEqual(x, torch.nn.functional.softmax(x, 3)) self.assertEqual(x, torch.nn.functional.log_softmax(x, 0)) self.assertEqual(x, torch.nn.functional.log_softmax(x, 2)) self.assertEqual(x, torch.nn.functional.log_softmax(x, 3)) # cumsum, cumprod, cummax, cummin self.assertEqual(shape, torch.cumsum(x, 0).shape) self.assertEqual(shape, torch.cumsum(x, 2).shape) self.assertEqual(shape, torch.cumprod(x, 0).shape) self.assertEqual(shape, torch.cumprod(x, 2).shape) self.assertEqual(shape, torch.cummax(x, 0)[0].shape) self.assertEqual(shape, torch.cummax(x, 2)[0].shape) self.assertEqual(shape, torch.cummin(x, 0)[0].shape) self.assertEqual(shape, torch.cummin(x, 2)[0].shape) self.assertEqual(shape, torch.logcumsumexp(x, 0).shape) self.assertEqual(shape, torch.logcumsumexp(x, 2).shape) # flip self.assertEqual(x, x.flip(0)) self.assertEqual(x, x.flip(2)) # roll self.assertEqual(x, x.roll(0, 1).roll(0, -1)) self.assertEqual(x, x.roll(1, x.size(1))) self.assertEqual(x, x.roll(1)) self.assertEqual(x, x.roll((1, 1), (3, 1))) # unbind self.assertEqual((), x.unbind(0)) self.assertEqual((torch.empty((0, 1, 0), device=device), torch.empty((0, 1, 0), device=device)), x.unbind(2)) # cross y = torch.randn((0, 1, 3, 0), device=device) self.assertEqual(y.shape, torch.cross(y, y).shape) # renorm self.assertEqual(shape, torch.renorm(x, 1, 0, 5).shape) self.assertEqual(shape, torch.renorm(x, 1, 2, 5).shape) # sort self.assertEqual([shape, shape], [z.shape for z in torch.sort(x, dim=0)]) self.assertEqual([shape, shape], [z.shape for z in torch.sort(x, dim=2)]) # topk self.assertEqual([shape, shape], [z.shape for z in torch.topk(x, 0, dim=0)]) self.assertEqual([(0, 1, 1, 0), (0, 1, 1, 0)], [z.shape for z in torch.topk(x, 1, dim=2)]) y = torch.randn((2, 3, 4), device=device) self.assertEqual([(2, 3, 0), (2, 3, 0)], [z.shape for z in torch.topk(y, 0)]) # gather self.assertEqual(shape, torch.gather(x, 0, torch.empty(shape, dtype=torch.int64, device=device)).shape) self.assertEqual(shape, torch.gather(x, 2, torch.empty(shape, dtype=torch.int64, device=device)).shape) larger_shape = torch.empty((0, 1, 3, 0), dtype=torch.int64, device=device) self.assertEqual(larger_shape.shape, torch.gather(x, 2, larger_shape).shape) smaller_shape = torch.empty((0, 1, 0, 0), dtype=torch.int64, device=device) self.assertEqual(smaller_shape.shape, torch.gather(x, 2, smaller_shape).shape) y = torch.randn((2, 3, 4), device=device) self.assertEqual((0, 3, 4), torch.gather(y, 0, torch.empty((0, 3, 4), dtype=torch.int64, device=device)).shape) # scatter, scatter_add for dim in [0, 2]: y = torch.randn(shape, device=device) y_src = torch.randn(shape, device=device) ind = torch.empty(shape, dtype=torch.int64, device=device) self.assertEqual(shape, y.scatter_(dim, ind, y_src).shape) self.assertEqual(shape, y.scatter_add_(dim, ind, y_src).shape) z = torch.randn((2, 3, 4), device=device) z_src = torch.randn((2, 3, 4), device=device) self.assertEqual(z, z.scatter_(2, torch.empty((2, 3, 0), dtype=torch.int64, device=device), z_src)) self.assertEqual(z, z.scatter_add_(2, torch.empty((2, 3, 0), dtype=torch.int64, device=device), z_src)) # index_fill, index_copy, index_add c = x.clone() c_clone = c.clone() ind_empty = torch.tensor([], dtype=torch.int64, device=device) ind_01 = torch.tensor([0, 1], dtype=torch.int64, device=device) self.assertEqual(c_clone, c.index_fill_(0, ind_empty, -1)) self.assertEqual(c_clone, c.index_fill_(2, ind_empty, -1)) self.assertEqual(c_clone, c.index_fill_(2, torch.tensor([0, 1], dtype=torch.int64, device=device), -1)) self.assertEqual(c_clone, c.index_copy_(0, ind_empty, torch.empty((0, 1, 2, 0), device=device))) self.assertEqual(c_clone, c.index_copy_(2, ind_empty, torch.empty((0, 1, 0, 0), device=device))) self.assertEqual(c_clone, c.index_copy_(2, ind_01, torch.empty((0, 1, 2, 0), device=device))) self.assertEqual(c_clone, c.index_add_(0, ind_empty, torch.empty((0, 1, 2, 0), device=device))) self.assertEqual(c_clone, c.index_add_(2, ind_empty, torch.empty((0, 1, 0, 0), device=device))) self.assertEqual(c_clone, c.index_add_(2, ind_01, torch.empty((0, 1, 2, 0), device=device))) c = torch.randn((0, 1, 2), device=device) c_clone = c.clone() self.assertEqual(c_clone, c.index_fill_(0, ind_empty, -1)) self.assertEqual(c_clone, c.index_copy_(0, ind_empty, torch.empty((0, 1, 2), device=device))) self.assertEqual(c_clone, c.index_add_(0, ind_empty, torch.empty((0, 1, 2), device=device))) self.assertEqual(c_clone, c.index_fill_(0, ind_empty, -1)) self.assertEqual(c_clone, c.index_copy_(0, ind_empty, torch.empty((0, 1, 2), device=device))) self.assertEqual(c_clone, c.index_add_(0, ind_empty, torch.empty((0, 1, 2), device=device))) # index fill/copy/add non-empty z = torch.randn((2, 3, 4), device=device) self.assertEqual(z, z.index_fill_(0, ind_empty, -1)) z = torch.randn((2, 3, 4), device=device) self.assertEqual(z, z.index_copy_(0, ind_empty, torch.empty((0, 3, 4), device=device))) z = torch.randn((2, 3, 4), device=device) self.assertEqual(z, z.index_add_(0, ind_empty, torch.empty((0, 3, 4), device=device))) # index_select self.assertEqual(x, x.index_select(0, ind_empty)) self.assertEqual((0, 1, 0, 0), x.index_select(2, ind_empty).shape) self.assertEqual(x, x.index_select(2, ind_01)) z = torch.randn((2, 3, 4), device=device) # non-empty self.assertEqual((0, 3, 4), z.index_select(0, ind_empty).shape) c = torch.randn((0, 1, 2), device=device) self.assertEqual(c, c.index_select(0, ind_empty)) c = torch.randn((0, 1, 2), device=device) self.assertEqual(c, c.index_select(0, ind_empty)) def _brute_pdist(self, inp, p=2): """Computes the same as torch.pdist using primitives""" n = inp.shape[-2] k = n * (n - 1) // 2 if k == 0: # torch complains about empty indices return torch.empty(inp.shape[:-2] + (0,), dtype=inp.dtype, device=inp.device) square = torch.norm(inp[..., None, :] - inp[..., None, :, :], p=p, dim=-1) unroll = square.view(square.shape[:-2] + (n * n,)) inds = torch.ones(k, dtype=torch.int) inds[torch.arange(n - 1, 1, -1, dtype=torch.int).cumsum(0)] += torch.arange(2, n, dtype=torch.int) return unroll[..., inds.cumsum(0)] def _pdist_single(self, shape, device, p, dtype, trans, grad_check=False): x = torch.randn(shape, dtype=dtype, device=device) if trans: x.transpose_(-2, -1) if grad_check: x.requires_grad_() y = x.detach().clone().requires_grad_() else: y = x actual = torch.pdist(x, p=p) expected = self._brute_pdist(y, p=p) self.assertEqual(expected.shape, actual.shape) self.assertEqual(expected, actual) if grad_check and expected.size() != torch.Size([0]): g0 = torch.rand_like(actual) actual.backward(g0) expected.backward(g0) self.assertEqual(x.grad, y.grad) @slowTest def test_pdist_norm_forward(self, device): for shape in [(4, 5), (3, 2), (2, 1), (1500, 1)]: for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: for trans in [False, True]: for dtype in [torch.float32, torch.float64]: self._pdist_single(shape, device, p, dtype, trans, grad_check=False) # do a simplified comparison with big inputs, see: # https://github.com/pytorch/pytorch/issues/15511 for dtype in [torch.float32, torch.float64]: self._pdist_single((1000, 2), device, 2, dtype, trans=False, grad_check=False) @slowTest def test_pdist_norm_backward(self, device): for shape in [(4, 5), (3, 2), (2, 1), (1500, 1)]: for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: for trans in [False, True]: self._pdist_single(shape, device, p, torch.float64, trans, grad_check=True) @unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "sandcastle OOM with current tpx gpu/re configuration") @skipIfRocm def test_pdist_norm_large(self, device): # use dim0>=46342 for forward, see: # https://github.com/pytorch/pytorch/issues/30583 # Compare output using GPU with the CPU implementation, as brute_pdist uses too much memory if 'cuda' in device: x = torch.randn(50000, 1, dtype=torch.float32) expected_cpu = torch.pdist(x, p=2) actual_gpu = torch.pdist(x.to(device), p=2) self.assertEqual(expected_cpu, actual_gpu.cpu()) @onlyOnCPUAndCUDA @dtypesIfCUDA(*set(torch.testing.get_all_math_dtypes('cuda'))) @dtypes(*set(torch.testing.get_all_math_dtypes('cpu'))) def test_addcdiv(self, device, dtype): # Returns floating or integral scalar corresponding to dtype def _number(floating, integer, dtype): if dtype in [torch.half, torch.float, torch.double, torch.bfloat16]: return floating elif dtype in [torch.cfloat, torch.cdouble]: return floating * (1 + 1j) else: return integer def non_zero_rand(size, dtype, device): if dtype.is_floating_point or dtype.is_complex: a = torch.rand(size=size, dtype=dtype, device=device) elif dtype == torch.uint8: a = torch.randint(1, 5, size=size, dtype=dtype, device=device) else: a = torch.randint(-5, 5, size=size, dtype=dtype, device=device) return a + (a == 0).to(dtype) def _test_addcdiv(): a = non_zero_rand((2, 2), dtype=dtype, device=device) b = non_zero_rand((2, 2), dtype=dtype, device=device) c = non_zero_rand((2, 2), dtype=dtype, device=device) alpha = _number(0.5, 3, dtype) expected = a + (alpha * b) / c actual = torch.addcdiv(a, b, c, value=alpha) self.assertEqual(expected, actual) with self.assertWarnsOnceRegex( UserWarning, "This overload of addcdiv is deprecated"): self.assertEqual(actual, torch.addcdiv(a, alpha, b, c)) if not (dtype.is_floating_point or dtype.is_complex): # Integer division with addcdiv is prohibited with self.assertRaises(RuntimeError): _test_addcdiv() else: _test_addcdiv() if self.device_type == 'cuda' and dtype == torch.half: a = torch.tensor([60000.0], device=device, dtype=dtype) b = torch.tensor([60000.0], device=device, dtype=dtype) c = torch.tensor([1.0], device=device, dtype=dtype) out = torch.addcmul(a, b, c, value=-2) self.assertTrue(not (out.isnan() or out.isinf())) def test_nullary_op_mem_overlap(self, device): ops = ( ("random_", ()), ("uniform_", ()), ("cauchy_", ()), ("log_normal_", ()), ("exponential_", ()), ("geometric_", (0.5,)), ("normal_", ()), ) x = torch.rand((1, 3)).expand((3, 3)) for op, args in ops: with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): getattr(x, op)(*args) @dtypes(torch.double) def test_ternary_op_mem_overlap(self, device, dtype): ops = [ ("addcmul", True, True, 'cpu'), ("addcmul", True, True, 'cuda'), ("addcdiv", True, True, 'cpu'), ("addcdiv", True, True, 'cuda'), ("lerp", True, True, 'cpu'), ("lerp", True, True, 'cuda') ] for (fn, has_input_output_mem_overlap_check, has_internal_mem_overlap_check, dev) in ops: if dev != device: continue out_op = getattr(torch, fn) inplace_op = getattr(torch.Tensor, fn + '_') self.check_internal_mem_overlap( inplace_op, 3, dtype, device, expected_failure=not has_internal_mem_overlap_check) self.ternary_check_input_output_mem_overlap(out_op, dev, expected_failure=not has_input_output_mem_overlap_check) @dtypes(torch.double) @onlyOnCPUAndCUDA def test_copy_mem_overlap(self, device, dtype): self.check_internal_mem_overlap( torch.Tensor.copy_, num_inputs=2, dtype=dtype, device=device) sz = 9 doubles = torch.randn(2 * sz, dtype=dtype, device=device) self.unary_check_input_output_mem_overlap( doubles, sz, lambda input, out: out.copy_(input)) @onlyOnCPUAndCUDA def test_index_add_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) y = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device) value = torch.rand((3,), device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.index_add_(0, ind, value) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.index_add_(0, ind, y[:3]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_add_(0, ind, ind.clone()) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_add_(0, ind.clone(), ind) @onlyOnCPUAndCUDA def test_index_copy_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) y = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device) value = torch.rand((3,), device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.index_copy_(0, ind, value) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.index_copy_(0, ind, y[:3]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_copy_(0, ind, ind.clone()) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_copy_(0, ind.clone(), ind) @onlyOnCPUAndCUDA def test_index_fill_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) y = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device) value = torch.rand((3,), device=device) with self.assertWarnsRegex(UserWarning, "index_fill_ on expanded tensors"): x.index_fill_(0, ind, 1.0) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_fill_(0, ind, 0) @onlyOnCPUAndCUDA def test_shift_mem_overlap(self, device): x = torch.rand(3, device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x[:-1] <<= x[1:] with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x[:-1] >>= x[1:] @onlyOnCPUAndCUDA def test_bernoulli_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.bernoulli_() with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.bernoulli_(p=0.1) p = torch.rand(6, device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.bernoulli_(p=p) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.bernoulli(torch.rand_like(x), out=x) @onlyOnCPUAndCUDA def test_put_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) y = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device) value = torch.rand((3,), device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.put_(ind, value) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.put_(ind[0], y[0]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.put_(ind, ind) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.put_(ind, y[:3]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.put_(ind, ind.clone()) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.put_(ind.clone(), ind) @onlyOnCPUAndCUDA def test_index_put_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) y = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device) value = torch.rand((3,), device=device) with self.assertWarnsRegex(UserWarning, 'expanded tensors'): x.index_put_((ind,), value) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.index_put_((ind,), y[0]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_put_((ind,), ind) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.index_put_((ind,), y[:3]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_put_((ind,), ind.clone()) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_put_((ind.clone(),), ind) @onlyOnCPUAndCUDA def test_masked_fill_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) mask = torch.tensor([True, False, True, True, False, False], device=device) with self.assertWarnsRegex(UserWarning, 'expanded tensors'): x.masked_fill_(mask, 0.) fill_val = torch.tensor(0., device=device) with self.assertWarnsRegex(UserWarning, 'expanded tensors'): x.masked_fill_(mask, fill_val) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): mask[1:].masked_fill_(mask[:-1], False) @onlyOnCPUAndCUDA def test_masked_select_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((3,)) y = torch.rand((6,), device=device) mask = torch.tensor([True, False, True, True, False, False], device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.masked_select(y, mask, out=x) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.masked_select(y, mask, out=y) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.masked_select(mask.clone(), mask, out=mask) @onlyOnCPUAndCUDA def test_masked_scatter_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) src = torch.rand((3,), device=device) mask = torch.tensor([True, False, True, True, False, False], device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.masked_scatter_(mask, src) @onlyOnCPUAndCUDA def test_index_select_mem_overlap(self, device): x = torch.rand((1, 6), device=device).expand((2, 6)) y = torch.rand((3, 6), device=device) ind = torch.tensor([0, 1], dtype=torch.int64, device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.index_select(y, 1, ind, out=x) @onlyOnCPUAndCUDA def test_scatter_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) src = torch.rand((3,), device=device) ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.scatter_(0, ind, src) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): src.scatter_(0, ind, src) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.scatter_(0, ind, ind.clone()) @onlyOnCPUAndCUDA def test_gather_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((3,)) src = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.gather(src, 0, ind, out=x) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.gather(src, 0, ind, out=src) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.gather(ind.clone(), 0, ind[1:], out=ind[:1]) @onlyOnCPUAndCUDA def test_take_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((3,)) src = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.take(src, ind, out=x) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.take(src, ind, out=src) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.take(ind.clone(), ind[1:], out=ind[:-1]) @onlyCUDA def test_multinomial_device_constrain(self, device): x = torch.empty(0, device="cpu") y = torch.empty(0, device=device) self.assertRaisesRegex( RuntimeError, "Expected all tensors to be on the same device", lambda: torch.multinomial(x, 2, out=y)) @deviceCountAtLeast(2) @onlyCUDA def test_multinomial_gpu_device_constrain(self, devices): x = torch.empty(0, device=devices[0]) y = torch.empty(0, device=devices[1]) self.assertRaisesRegex( RuntimeError, "Expected all tensors to be on the same device", lambda: torch.multinomial(x, 2, out=y)) @deviceCountAtLeast(2) @onlyCUDA def test_device_guard(self, devices): # verify that all operators with `device_guard: False` behave properly with multiple devices. # TODO: if we had operator introspection we could figure out this set of operators automatically... x = torch.randn((1, 2, 3), device=devices[1]) y = torch.zeros((1, 3, 2), device=devices[1]) scalar = torch.tensor(5, device=devices[1]) # property ops torch.cudnn_is_acceptable(x) x.is_distributed() x.is_floating_point() x.is_complex() x.is_same_size(y) x.is_signed() x.size(0) x.stride(0) x.numel() x.is_set_to(y) x.data_ptr() scalar.is_nonzero() # sparse property ops y[0][1] = 5 y_sparse = y.to_sparse() y_sparse.sparse_dim() y_sparse._dimI() y_sparse.dense_dim() y_sparse._dimV() y_sparse._nnz() y_sparse.is_coalesced() y_sparse._indices() y_sparse._values() y_sparse.indices() y_sparse.values() # in-place ops def inplace(): return torch.randn((1, 2, 3), device=devices[1]) inplace().as_strided_(y.size(), y.stride()) inplace().resize_(y.size()) inplace().squeeze_() inplace().squeeze_(0) inplace().unsqueeze_(2) inplace().transpose_(1, 2) inplace().squeeze_().t_() inplace().set_(x.storage()) inplace().set_(x.storage(), x.storage_offset(), x.size(), x.stride()) inplace().set_(x) inplace().set_() y_sparse._coalesced_(True) # shape modification x.as_strided(y.size(), y.stride()) x.expand((5, 2, 3)) x.expand_as(x) x.sum_to_size((1,)) torch.broadcast_tensors(x , x) x.reshape((1, 3, 2)) x.reshape_as(y) x.squeeze() x.squeeze(0) x.squeeze().t() x.transpose(1, 2) x.unsqueeze(2) x.view((1, 3, 2)) x.view_as(y) # chunk, split, etc. x.chunk(2, dim=1) x.split(1, dim=2) x.split_with_sizes([1, 2], dim=2) x.unfold(dimension=2, size=1, step=1) x.narrow(1, 1, 1) x.select(1, 1) torch.isnan(x) torch.empty((1, 3, 2), out=y) torch.empty_like(x) torch.empty_like(x, dtype=torch.int64) # to x.to(x) x.to(y) x.to(x, copy=True) def test_is_signed(self, device): self.assertEqual(torch.IntTensor(5).to(device).is_signed(), True) self.assertEqual(torch.ByteTensor(5).to(device).is_signed(), False) self.assertEqual(torch.CharTensor(5).to(device).is_signed(), True) self.assertEqual(torch.FloatTensor(5).to(device).is_signed(), True) self.assertEqual(torch.HalfTensor(10).to(device).is_signed(), True) # Note - reports a leak of 512 bytes on CUDA device 1 @deviceCountAtLeast(2) @skipCUDAMemoryLeakCheckIf(True) @onlyCUDA def test_tensor_set_errors_multigpu(self, devices): f_cuda0 = torch.randn((2, 3), dtype=torch.float32, device=devices[0]) f_cuda1 = torch.randn((2, 3), dtype=torch.float32, device=devices[1]) self.assertRaises(RuntimeError, lambda: f_cuda0.set_(f_cuda1.storage())) self.assertRaises(RuntimeError, lambda: f_cuda0.set_(f_cuda1.storage(), 0, f_cuda1.size(), f_cuda1.stride())) self.assertRaises(RuntimeError, lambda: f_cuda0.set_(f_cuda1)) @onlyCUDA def test_half_tensor(self, device): x = torch.randn(5, 5).half() self.assertEqual(x.to(device), x) xc = x.to(device) with tempfile.NamedTemporaryFile() as f: torch.save(xc, f) f.seek(0) xc2 = torch.load(f) self.assertIsInstance(xc2, type(xc)) self.assertEqual(xc.float(), xc2.float()) @onlyCUDA @deviceCountAtLeast(1) # Note: Tests works with one but prefers more devices def test_serialization(self, devices): def _test_serialization(filecontext_lambda): t0 = torch.cuda.FloatTensor(5).fill_(1) with torch.cuda.device(devices[-1]): tn = torch.cuda.FloatTensor(3).fill_(2) torch.cuda.set_device(devices[0]) b = (t0, tn) with filecontext_lambda() as f: torch.save(b, f) f.seek(0) c = torch.load(f) self.assertEqual(b, c, atol=0, rtol=0) u0, un = c self.assertEqual(str(u0.device), devices[0]) self.assertEqual(str(un.device), devices[-1]) _test_serialization(tempfile.NamedTemporaryFile) _test_serialization(BytesIOContext) def test_memory_format_preserved_after_permute(self, device): x = torch.randn(4, 3, 8, 8, device=device) nhwc = x.contiguous(memory_format=torch.channels_last) y = nhwc.permute(0, 1, 3, 2).permute(0, 1, 3, 2) self.assertTrue(y.is_contiguous(memory_format=torch.channels_last)) x = torch.randn(4, 3, 8, 8, 8, device=device) ndhwc = x.contiguous(memory_format=torch.channels_last_3d) y = ndhwc.permute(0, 1, 4, 3, 2).permute(0, 1, 4, 3, 2) self.assertTrue(y.is_contiguous(memory_format=torch.channels_last_3d)) def test_memory_format_propagation_rules(self, device): contiguous = torch.rand(10, 3, 5, 5, device=device) cl = torch.rand(10, 3, 5, 5, device=device).contiguous(memory_format=torch.channels_last) ambiguous = torch.rand(10, 3, 1, 1, device=device).contiguous(memory_format=torch.channels_last) self.assertTrue(ambiguous.is_contiguous(memory_format=torch.channels_last)) self.assertTrue(ambiguous.is_contiguous(memory_format=torch.contiguous_format)) bias = torch.rand(1, 1, 1, 1, device=device).contiguous(memory_format=torch.channels_last) def _test_propagation_rules(self, contiguous, cl, ambiguous, bias): options = ((ambiguous, contiguous, torch.contiguous_format), (ambiguous, cl, torch.channels_last), (contiguous, ambiguous, torch.contiguous_format), (contiguous, cl, torch.contiguous_format), (cl, ambiguous, torch.channels_last), (cl, contiguous, torch.channels_last), (bias, cl, torch.channels_last), (cl, bias, torch.channels_last),) for a, b, mf in options: result = a + b self.assertTrue(result.is_contiguous(memory_format=mf)) _test_propagation_rules(self, contiguous, cl, ambiguous, bias) cl = cl.to(memory_format=torch.channels_last) ambiguous = ambiguous.to(memory_format=torch.channels_last) bias = bias.to(memory_format=torch.channels_last) _test_propagation_rules(self, contiguous, cl, ambiguous, bias) # test cases when strides matter in ambiguous tensors for mf in (torch.channels_last, torch.contiguous_format): ambiguous = torch.rand(10, 3, 1, 1, device=device).to(memory_format=mf) bias = torch.rand(3, 1, 1, device=device) result = ambiguous + bias self.assertEqual(ambiguous.stride(), result.stride()) result = bias + ambiguous self.assertEqual(ambiguous.stride(), result.stride()) result = ambiguous * 5 self.assertEqual(ambiguous.stride(), result.stride()) def test_memory_format_empty_like(self, device): def test_helper(x, memory_format): xc = x.contiguous(memory_format=memory_format) like = torch.empty_like(xc, memory_format=torch.preserve_format) self.assertFalse(like.is_contiguous()) self.assertTrue(like.is_contiguous(memory_format=memory_format)) like_x = torch.empty_like(x, memory_format=torch.preserve_format) self.assertTrue(like_x.is_contiguous()) self.assertFalse(like_x.is_contiguous(memory_format=memory_format)) like = torch.empty_like(x, memory_format=memory_format) self.assertFalse(like.is_contiguous()) self.assertTrue(like.is_contiguous(memory_format=memory_format)) like = torch.empty_like(xc, memory_format=torch.contiguous_format) self.assertTrue(like.is_contiguous()) self.assertFalse(like.is_contiguous(memory_format=memory_format)) like = torch.empty_like(xc) self.assertFalse(like.is_contiguous()) self.assertTrue(like.is_contiguous(memory_format=memory_format)) sparse = x.to_sparse() with self.assertRaises(RuntimeError): z = torch.empty_like(sparse, memory_format=torch.preserve_format) test_helper(torch.randn(4, 3, 8, 8, device=device), torch.channels_last) test_helper(torch.randn(4, 3, 8, 8, 8, device=device), torch.channels_last_3d) def test_memory_format_consistency(self, device): x = torch.randn(10, 3, 1, 1, device=device) x_rep = x.as_strided(x.size(), x.stride()) self.assertEqual(x.size(), x_rep.size()) self.assertEqual(x.stride(), x_rep.stride()) self.assertEqual(x.is_contiguous(), x_rep.is_contiguous()) self.assertEqual(x.is_contiguous(memory_format=torch.channels_last), x_rep.is_contiguous(memory_format=torch.channels_last)) self.assertEqual( x.is_contiguous(memory_format=torch.channels_last_3d), x_rep.is_contiguous(memory_format=torch.channels_last_3d)) def test_memory_format_operators(self, device): def _chunk_op(x, y): x1, x2 = x.chunk(2, dim=1) return x1 + x2 def _unsqueeze_op_add(x, y): return x[0].unsqueeze(0) + 3 def _unsqueeze_op_clone(x, y): return x[0].unsqueeze(0).clone() def _test_helper(x, y, bias, memory_format): return_contig_fns = [ lambda x, y: y + x, lambda x, y: y * x, lambda x, y: y.addcdiv(x, y, value=2), lambda x, y: y.addcmul(x, y, value=2), ] bias_fns = [ lambda x, b: x + b, lambda x, b: b + x, ] fns = [ lambda x, y: x.clone(), lambda x, y: x + 3, lambda x, y: 3 * x, lambda x, y: x + y, lambda x, y: x * y, lambda x, y: abs(x), lambda x, y: x.abs(), lambda x, y: x.abs_(), lambda x, y: x.acos(), lambda x, y: x.acos_(), lambda x, y: x.add(y, alpha=3), lambda x, y: x.add_(y, alpha=3), lambda x, y: x.addcdiv(y, y, value=2), lambda x, y: x.addcdiv_(y, y, value=2), lambda x, y: x.addcmul(y, y, value=2), lambda x, y: x.addcmul_(y, y, value=2), lambda x, y: x.acosh(), lambda x, y: x.acosh_(), lambda x, y: x.asinh(), lambda x, y: x.asinh_(), lambda x, y: x.atanh(), lambda x, y: x.atanh_(), lambda x, y: x.asin(), lambda x, y: x.asin_(), lambda x, y: x.atan(), lambda x, y: x.atan2(y), lambda x, y: x.atan2_(y), lambda x, y: x.ceil(), lambda x, y: x.ceil_(), lambda x, y: x.clamp(-1, 1), lambda x, y: x.cos(), lambda x, y: x.cosh(), lambda x, y: x.div(0.5), lambda x, y: x.div_(0.5), lambda x, y: x.div(y), lambda x, y: x.div_(y), lambda x, y: x.digamma(), lambda x, y: x.digamma_(), lambda x, y: x.erf(), lambda x, y: x.erfc(), lambda x, y: x.erfinv(), lambda x, y: x.erfinv_(), lambda x, y: x.exp(), lambda x, y: x.expm1(), lambda x, y: x.expm1_(), lambda x, y: x.floor(), lambda x, y: x.floor_(), lambda x, y: x.fmod(2), lambda x, y: x.frac(), lambda x, y: x.hypot(y), lambda x, y: x.hypot_(y), lambda x, y: x.i0(), lambda x, y: x.i0_(), lambda x, y: x.lerp(y, 0.5), lambda x, y: x.log(), lambda x, y: x.log_(), lambda x, y: x.log10(), lambda x, y: x.log10_(), lambda x, y: x.log1p(), lambda x, y: x.log1p_(), lambda x, y: x.log2(), lambda x, y: x.log2_(), lambda x, y: x.mul(3), lambda x, y: x.mul_(3), lambda x, y: x.neg(), lambda x, y: x.neg_(), lambda x, y: x.pow(3), lambda x, y: x.pow_(3), lambda x, y: x.pow(0.0), lambda x, y: x.pow(1.0), lambda x, y: x.reciprocal(), lambda x, y: x.remainder(2), lambda x, y: x.round(), lambda x, y: x.round_(), lambda x, y: x.rsqrt(), lambda x, y: x.rsqrt_(), lambda x, y: x.sigmoid(), lambda x, y: x.sigmoid_(), lambda x, y: x.logit(), lambda x, y: x.logit_(), lambda x, y: x.logit(1e-6), lambda x, y: x.logit_(1e-6), lambda x, y: x.sign(), lambda x, y: x.sign_(), lambda x, y: x.sgn(), lambda x, y: x.sgn_(), lambda x, y: x.sin(), lambda x, y: x.sin_(), lambda x, y: x.sinh(), lambda x, y: x.sinh_(), lambda x, y: x.sqrt(), lambda x, y: x.sqrt_(), lambda x, y: x.tan(), lambda x, y: x.tanh(), lambda x, y: x.trunc(), lambda x, y: x.trunc_(), _chunk_op, _unsqueeze_op_add, _unsqueeze_op_clone, ] for fn in fns: x_c = x.contiguous() y_c = y.contiguous() result_c = fn(x_c, y_c) result = fn(x, y) self.assertEqual(result, result_c) self.assertTrue( result.is_contiguous(memory_format=memory_format), "result of the '{}' is not in '{}' format".format(inspect.getsource(fn).strip(), memory_format)) for fn in bias_fns: x_c = x.contiguous() b_c = bias.contiguous() result_c = fn(x_c, b_c) result = fn(x, bias) self.assertEqual(result, result_c) self.assertTrue( result.is_contiguous(memory_format=memory_format), "result of the '{}' is not in '{}' format".format(inspect.getsource(fn).strip(), memory_format)) for fn in return_contig_fns: x_c = x.contiguous() y_c = y.contiguous() result_c = fn(x_c, y_c) result = fn(x, y) self.assertEqual(result, result_c) self.assertTrue( result.is_contiguous(memory_format=torch.contiguous_format), "result of the '{}' is not in '{}' format".format(inspect.getsource(fn).strip(), torch.contiguous_format)) _test_helper( torch.randn((4, 3, 8, 8), device=device).contiguous(memory_format=torch.channels_last), abs(torch.randn((4, 3, 8, 8), device=device)) + 1, torch.randn((1, 3, 1, 1), device=device).contiguous(memory_format=torch.channels_last), torch.channels_last) _test_helper( torch.randn((4, 3, 8, 8, 8), device=device).contiguous(memory_format=torch.channels_last_3d), abs(torch.randn((4, 3, 8, 8, 8), device=device)) + 1, torch.randn((1, 3, 1, 1, 1), device=device).contiguous(memory_format=torch.channels_last_3d), torch.channels_last_3d) def test_strides_propagation(self, device): def _test_helper(x, op, unary=False): def compare_strides(s1, s2, div): sdiv = [s // div for s in s1] self.assertEqual(sdiv, s2) dim = x.dim() # we produce memory dense outputs, so when input is strided on the last dimension # we need to divide by that dimension stride to compare input and result strides div = x.stride(-1) for p in permutations(range(dim)): xp = x.permute(p) if not unary: y = torch.randn(xp.size(-1), device=x.device, dtype=x.dtype) for inputs in ((xp, xp), (xp, y), (y, xp)): res = op(*inputs) compare_strides(xp.stride(), res.stride(), div) self.assertEqual(xp.size(), res.size()) out = torch.empty(0, device=xp.device, dtype=res.dtype) res = op(*inputs, out=out) compare_strides(xp.stride(), res.stride(), div) self.assertEqual(xp.size(), res.size()) else: res = op(xp) compare_strides(xp.stride(), res.stride(), div) self.assertEqual(xp.size(), res.size()) out = torch.empty(0, device=xp.device, dtype=res.dtype) res = op(xp, out=out) compare_strides(xp.stride(), res.stride(), div) self.assertEqual(xp.size(), res.size()) # torch.eq by default calls TensorIterator with defined output, torch.add with undefined binary_ops = (torch.eq, torch.add) unary_ops = (torch.exp,) # memory dense, sliced and ambiguous sliced (ambiguous dense loses permutation information) xs = (torch.randn(2, 3, 4, device=device), torch.randn(2, 3, 8, device=device)[:, :, ::2], torch.randn(1, 1, 4, 12, device=device)[:, :, :, ::2]) for op in binary_ops: for x in xs: _test_helper(x, op) for op in unary_ops: for x in xs: _test_helper(x, op, unary=True) @skipMeta @dtypes(*torch.testing.get_all_dtypes()) def test_dlpack_conversion(self, device, dtype): # DLpack does not explicitly support bool # It does it through uint8 type if dtype is torch.bool: return x = make_tensor((5,), device, dtype, low=-9, high=9) z = from_dlpack(to_dlpack(x)) self.assertEqual(z, x) @onlyCUDA @unittest.skipIf(PYTORCH_CUDA_MEMCHECK, "is_pinned uses failure to detect pointer property") def test_pin_memory_from_constructor(self, device): def _get_like(t, **kwargs): return [ torch.rand_like(t, **kwargs), torch.randn_like(t, **kwargs), torch.empty_like(t, **kwargs), torch.full_like(t, 4, **kwargs), torch.zeros_like(t, **kwargs), torch.ones_like(t, **kwargs), ] def _get_tensors(**kwargs): return [ torch.tensor([10, 11], **kwargs), torch.randn(3, 5, **kwargs), torch.rand(3, **kwargs), # torch.randint(3, 5, **kwargs), // unsupported torch.zeros(3, **kwargs), torch.randperm(3, **kwargs), torch.empty(6, **kwargs), torch.ones(6, **kwargs), torch.eye(6, **kwargs), torch.arange(3, 5, **kwargs)] pinned_tensors = _get_tensors(pin_memory=True) + _get_like(torch.empty(5, dtype=torch.float64), pin_memory=True) for x in pinned_tensors: self.assertTrue(x.is_pinned()) tensors = _get_tensors() + _get_like(torch.empty(5, dtype=torch.float64, pin_memory=True)) for x in tensors: self.assertFalse(x.is_pinned()) def test_storage_device(self, device): x = torch.tensor([], device=device) self.assertEqual(x.dtype, x.storage().dtype) @deviceCountAtLeast(2) @onlyCUDA def test_storage_multigpu(self, devices): for device in devices: x = torch.tensor([], device=device) self.assertEqual(x.dtype, x.storage().dtype) @dtypesIfCUDA(torch.float, torch.double, torch.half) @dtypes(torch.float, torch.double) def test_multinomial(self, device, dtype): def make_prob_dist(shape, is_contiguous): if is_contiguous: if dtype == torch.half: return torch.zeros(shape, device=device).uniform_().to(dtype=torch.half) return torch.zeros(shape, device=device, dtype=dtype).uniform_() elif len(shape) == 1: if dtype == torch.half: return torch.zeros((shape + [5]), device=device).uniform_().to(dtype=torch.half)[:, 2] return torch.zeros((shape + [5]), device=device, dtype=dtype).uniform_()[:, 2] else: # num dim = 2 new_shape = [2, shape[1], 7, 1, shape[0], 1, 10] if dtype == torch.half: prob_dist = torch.zeros(new_shape, device=device).uniform_().to(dtype=torch.half) else: prob_dist = torch.zeros(new_shape, device=device, dtype=dtype).uniform_() prob_dist = prob_dist.transpose(1, 4) prob_dist = prob_dist[1, :, 5, 0, :, 0, 4] assert not prob_dist.is_contiguous() # sanity check return prob_dist for is_contiguous in (True, False): # with replacement n_row = 3 for n_col in range(4, 5 + 1): prob_dist = make_prob_dist([n_row, n_col], is_contiguous) # indices that shouldn't be sampled (<0 means none) zero_prob_indices = torch.LongTensor(n_row).random_(-2, n_col).tolist() for i, j in enumerate(zero_prob_indices): if j >= 0: prob_dist[i, j] = 0 n_sample = n_col * 3 sample_indices = torch.multinomial(prob_dist, n_sample, True) self.assertEqual(prob_dist.dim(), 2) self.assertEqual(sample_indices.size(1), n_sample) for i in range(n_row): zero_prob_idx = zero_prob_indices[i] if zero_prob_idx < 0: continue for j in range(n_sample): self.assertNotEqual(sample_indices[i, j], zero_prob_idx, msg="sampled an index with zero probability") # without replacement n_row = 3 for n_col in range(2, 10 + 1, 2): prob_dist = make_prob_dist([n_row, n_col], is_contiguous) # indices that shouldn't be sampled (<0 means none) zero_prob_indices = torch.LongTensor(n_row).random_(-1, n_col).tolist() for i, j in enumerate(zero_prob_indices): if j >= 0: prob_dist[i, j] = 0 n_sample = max(1, n_col - 2) sample_indices = torch.multinomial(prob_dist, n_sample, False) self.assertEqual(prob_dist.dim(), 2) self.assertEqual(sample_indices.size(1), n_sample) for i in range(n_row): row_samples = {} zero_prob_idx = zero_prob_indices[i] for j in range(n_sample): sample_idx = sample_indices[i, j] if zero_prob_idx >= 0: self.assertNotEqual(sample_idx, zero_prob_idx, msg="sampled an index with zero probability") self.assertNotIn(sample_idx, row_samples, "sampled an index twice") row_samples[sample_idx] = True # vector n_col = 4 prob_dist = make_prob_dist([n_col], is_contiguous).fill_(1) zero_prob_idx = 1 # index that shouldn't be sampled prob_dist[zero_prob_idx] = 0 n_sample = 20 sample_indices = torch.multinomial(prob_dist, n_sample, True) for sample_index in sample_indices: self.assertNotEqual(sample_index, zero_prob_idx, msg="sampled an index with zero probability") s_dim = sample_indices.dim() self.assertEqual(sample_indices.dim(), 1, msg="wrong number of dimensions") self.assertEqual(prob_dist.dim(), 1, msg="wrong number of prob_dist dimensions") self.assertEqual(sample_indices.size(0), n_sample, msg="wrong number of samples") # CUDA misalignment issue (#46702) n_row, n_col = 2, 3 prob_dist = make_prob_dist([n_row, n_col], True) n_sample = 1 sample_indices = torch.multinomial(prob_dist, n_sample, True) self.assertEqual(sample_indices.dim(), 2, msg="wrong number of dimensions") self.assertEqual(sample_indices.size(1), n_sample, msg="wrong number of samples") @onlyCUDA @dtypes(torch.float, torch.double, torch.half) def test_multinomial_deterministic(self, device, dtype): gen = torch.Generator(device=device) trials = 5 seed = 0 prob_dist = torch.rand(10000, 1000, device=device, dtype=dtype) n_sample = 1 for i in range(trials): gen.manual_seed(seed) samples_1 = torch.multinomial(prob_dist, n_sample, True, generator=gen) gen.manual_seed(seed) samples_2 = torch.multinomial(prob_dist, n_sample, True, generator=gen) self.assertEqual(samples_1, samples_2) self.assertEqual(samples_1.dim(), 2, msg="wrong number of dimensions") self.assertEqual(samples_1.size(1), n_sample, msg="wrong number of samples") @slowTest @dtypes(torch.float) def test_multinomial_rng_state_advance(self, device, dtype): corpus_size = 100000 freqs = torch.ones(corpus_size, dtype=torch.float, device=device) n_sample = 100 samples1 = torch.multinomial(freqs, n_sample, replacement=True) samples2 = torch.multinomial(freqs, n_sample, replacement=True) samples = torch.cat([samples1, samples2]) # expect no more than 1 repeating elements generated in 2 attempts # the probability of at least element being repeated is surprisingly large, 18% self.assertLessEqual(2 * n_sample - samples.unique().size(0), 2) samples1 = torch.multinomial(freqs, n_sample, replacement=False) samples2 = torch.multinomial(freqs, n_sample, replacement=False) samples = torch.cat([samples1, samples2]) # expect no more than 1 repeating elements generated in 2 attempts self.assertLessEqual(2 * n_sample - samples.unique().size(0), 1) def _test_memory_format_transformations(self, device, input_generator_fn, transformation_fn, memory_format, compare_data=True, default_is_preserve=False): assert(memory_format == torch.channels_last or memory_format == torch.channels_last_3d) # xc is a channels last tensor xc = input_generator_fn(device) # xc is not memory dense, but looks like channels last if memory_format == torch.channels_last: xc = xc[..., ::2, ::2] else: xc = xc[..., ::2, ::2, ::2] clone = transformation_fn(xc, memory_format=torch.preserve_format) self.assertFalse(clone.is_contiguous()) self.assertTrue(clone.is_contiguous(memory_format=memory_format)) self.assertFalse(xc.is_contiguous()) self.assertFalse(xc.is_contiguous(memory_format=memory_format)) if compare_data: self.assertEqual(xc, clone.to(xc)) xc = input_generator_fn(device) clone = transformation_fn(xc, memory_format=torch.contiguous_format) self.assertTrue(clone.is_contiguous()) self.assertFalse(clone.is_contiguous(memory_format=memory_format)) if compare_data: self.assertEqual(xc, clone.to(xc)) xc = input_generator_fn(device) clone = transformation_fn(xc) if default_is_preserve: self.assertFalse(clone.is_contiguous()) self.assertTrue(clone.is_contiguous(memory_format=memory_format)) else: self.assertTrue(clone.is_contiguous()) self.assertFalse(clone.is_contiguous(memory_format=memory_format)) if compare_data: self.assertEqual(xc, clone.to(xc)) x = torch.randn((3, 4, 5, 6, 7, 8, 9), device=device) for _ in range(10): permutation = list(range(len(x.shape))) random.shuffle(permutation) x = x.permute(permutation) self.assertEqual(x.stride(), transformation_fn(x, memory_format=torch.preserve_format).stride()) def test_memory_format_to(self, device): def get_generator(memory_format, shape): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format) return input_generator_fn def transformation_fn(tensor, **kwargs): return tensor.to(dtype=torch.float64, **kwargs) formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape in formats_shapes: self._test_memory_format_transformations( device, get_generator(mf, shape), transformation_fn, mf, default_is_preserve=True) def test_memory_format_type(self, device): def get_generator(memory_format, shape): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format) return input_generator_fn def transformation_fn(tensor, **kwargs): return tensor.to(torch.float64, **kwargs) formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape in formats_shapes: self._test_memory_format_transformations( device, get_generator(mf, shape), transformation_fn, mf, default_is_preserve=True) def test_memory_format_clone(self, device): def get_generator(memory_format, shape): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format) return input_generator_fn def transformation_fn(tensor, **kwargs): return tensor.clone(**kwargs) formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape in formats_shapes: self._test_memory_format_transformations( device, get_generator(mf, shape), transformation_fn, mf, True, default_is_preserve=True) def test_memory_format_factory_like_functions_preserve(self, device): def get_generator(memory_format, shape): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format) return input_generator_fn transformation_fns = [ lambda t, **kwargs: torch.zeros_like(t, **kwargs), lambda t, **kwargs: torch.ones_like(t, **kwargs), lambda t, **kwargs: torch.randint_like(t, 10, 100, **kwargs), lambda t, **kwargs: torch.randint_like(t, 100, **kwargs), lambda t, **kwargs: torch.randn_like(t, **kwargs), lambda t, **kwargs: torch.rand_like(t, **kwargs), lambda t, **kwargs: torch.full_like(t, 7, **kwargs), lambda t, **kwargs: torch.empty_like(t, **kwargs)] formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape, in formats_shapes: for transformation_fn in transformation_fns: self._test_memory_format_transformations( device, get_generator(mf, shape), transformation_fn, mf, compare_data=False, default_is_preserve=True) def test_memory_format_type_shortcuts(self, device): def get_generator(memory_format, shape, dtype): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=dtype).clamp(0, 1) \ .round().contiguous(memory_format=memory_format) return input_generator_fn def get_fn(fn_name): def transformation_fn(tensor, **kwargs): fn = getattr(tensor, fn_name) return fn(**kwargs) return transformation_fn shortcuts = ['byte', 'char', 'double', 'bool', 'half', 'int', 'long', 'short'] if device == 'cpu': shortcuts += ['bfloat16'] formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape in formats_shapes: for fn_name in shortcuts: self._test_memory_format_transformations( device, get_generator(mf, shape, torch.float32), get_fn(fn_name), mf, default_is_preserve=True) # Test 'float' separately to avoid float->float no-op. for mf, shape in formats_shapes: self._test_memory_format_transformations( device, get_generator(mf, shape, torch.float64), get_fn('float'), mf, default_is_preserve=True) @onlyCUDA def test_memory_format_cpu_and_cuda_ops(self, device): def get_generator(memory_format, shape): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format) return input_generator_fn def transformation_cpu_fn(tensor, **kwargs): return tensor.cpu(**kwargs) def transformation_cuda_fn(tensor, **kwargs): return tensor.cuda(**kwargs) formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape in formats_shapes: self._test_memory_format_transformations( 'cuda', get_generator(mf, shape), transformation_cpu_fn, mf, default_is_preserve=True) self._test_memory_format_transformations( 'cpu', get_generator(mf, shape), transformation_cuda_fn, mf, default_is_preserve=True) @dtypes(torch.complex64, torch.complex128) def test_complex_unsupported(self, device, dtype): t = torch.tensor((1 + 1j), device=device, dtype=dtype) # Note: this is consistent with NumPy with self.assertRaises(RuntimeError): torch.floor(t) with self.assertRaises(RuntimeError): torch.ceil(t) with self.assertRaises(RuntimeError): torch.trunc(t) # Tests min and max variants with complex inputs # Note: whether PyTorch should support min and max on complex # tensors is an open question. # See https://github.com/pytorch/pytorch/issues/36374 with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.min(t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): t.min() with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.min(t, dim=0) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.min(t, t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.min(t, t, out=t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.max(t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): t.max() with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.max(t, dim=0) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.max(t, t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.max(t, t, out=t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.amin(t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): t.amin() with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.amin(t, dim=0) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.amax(t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): t.amax() with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.amax(t, dim=0) # Tests _aminmax() variants with complex inputs, # which are currently not supported due to min & max being unsupported # for complex inputs, as per https://github.com/pytorch/pytorch/issues/36374 # Test with a single-element tensor t, as well as a multi-element tensor x with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): min_val, max_val = torch._aminmax(t) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): min_val = torch._aminmax(t, dim=0)[0] with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): max_val = torch._aminmax(t, dim=0)[1] # Test _aminmax() with a multi-element tensor x = torch.tensor([(1 + 1j), (2 + 3j)], device=device, dtype=dtype) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): min_val, max_val = torch._aminmax(x) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): min_val = torch._aminmax(x, dim=0)[0] with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): max_val = torch._aminmax(x, dim=0)[1] # Tests clamp variants with complex inputs # Note: whether PyTorch should support clamp on complex # tensors is an open question. # See https://github.com/pytorch/pytorch/issues/33568 min_val = 1 + 1j max_val = 4 + 4j out = torch.empty((0,), device=device, dtype=dtype) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.clamp(t, min=min_val) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.clamp(t, max=max_val) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.clamp(t, min_val, max_val) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.clamp(t, min=min_val, out=out) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.clamp(t, max=max_val, out=out) with self.assertRaisesRegex(RuntimeError, '(.*not support.*)|(.*not implemented.*)'): torch.clamp(t, min_val, max_val, out=out) def test_pickle_gradscaler(self, device): # This test is not in test_cuda.py because it should pass in 3 cases: # 1. cuda is not available. # 2. cuda is available but device is not cuda. # 3. cuda is available and device is cuda. # In case 1, a and b disable themselves on construction and shouldn't try to pickle workhorse attributes. # In case 2, a and b are enabled. Workhorse attributes participate in pickling, but none are lazy-inited # to cuda Tensors, because I don't want to do cuda things if device is not cuda. # In case 3, a and b are enabled and we may also try lazy-initing _scale to a cuda tensor. device = torch.device(device) try_lazy_inits = (True, False) if device.type == "cuda" else (False,) for lazy_init_scale in try_lazy_inits: a = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2) self.assertTrue(not a.is_enabled() if torch.cuda.amp.common.amp_definitely_not_available() else a.is_enabled()) if lazy_init_scale: # Dummy a.scale() call lazy-inits a._scale Tensor. a.scale(torch.tensor([4.0], dtype=torch.float32, device=device)) self.assertTrue(isinstance(a._scale, torch.cuda.FloatTensor)) # The following three lines should work whether or not cuda is available. serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertEqual(b.is_enabled(), a.is_enabled()) if a.is_enabled(): self.assertEqual(b.get_scale(), 3.) self.assertEqual(b.get_growth_factor(), 4.) self.assertEqual(b.get_backoff_factor(), .5) self.assertEqual(b.get_growth_interval(), 2) self.assertEqual(b._init_growth_tracker, 0) # supplies a dummy key to test the defaultdict's default_factory self.assertEqual(b._per_optimizer_states["fdsa"], torch.cuda.amp.grad_scaler._refresh_per_optimizer_state()) if lazy_init_scale: self.assertEqual(b.scale(torch.tensor([4.0], dtype=torch.float32, device=device)), 12.0) def test_multinomial_invalid(self, device): def test(probs): with self.assertRaisesRegex(RuntimeError, 'probability tensor contains either `inf`, `nan` or element < 0'): torch.multinomial(probs.to(device), 2) torch.cuda.synchronize() test(torch.tensor([1., -1., 1.])) test(torch.tensor([1., inf, 1.])) test(torch.tensor([1., -inf, 1.])) test(torch.tensor([1., 1., nan])) def test_multinomial_invalid_distribution(self, device): def test(probs, replacement): with self.assertRaisesRegex(RuntimeError, r"invalid multinomial distribution \(sum of probabilities <= 0\)"): torch.multinomial(probs, 2, replacement) torch.cuda.synchronize() x = torch.zeros(3, device=device) y = torch.zeros(3, 3, device=device) z = torch.zeros(3, 3, device=device) z[1, :] = 1 test(x, False) test(y, False) test(z, False) # Verify only for CPU as replacement=True # throws device side assert triggered. if self.device_type == 'cpu': test(x, True) test(y, True) test(z, True) def _test_multinomial_empty(self, device, replacement, num_samples): probs = torch.ones(0, 3, device=device) expected = torch.empty(0, num_samples, dtype=torch.int64) out = torch.multinomial(probs, num_samples=num_samples, replacement=replacement) self.assertEqual(out, expected) def test_multinomial_empty_w_replacement(self, device): self._test_multinomial_empty(device, True, 1) self._test_multinomial_empty(device, True, 2) def test_multinomial_empty_wo_replacement(self, device): self._test_multinomial_empty(device, False, 1) self._test_multinomial_empty(device, False, 2) def _generate_input(self, shape, dtype, device, with_extremal): if shape == (): x = torch.tensor((), dtype=dtype, device=device) else: if dtype.is_floating_point or dtype.is_complex: # work around torch.randn not being implemented for bfloat16 if dtype == torch.bfloat16: x = torch.randn(*shape, device=device) * random.randint(30, 100) x = x.to(torch.bfloat16) else: x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(30, 100) x[torch.randn(*shape) > 0.5] = 0 if with_extremal and dtype.is_floating_point: # Use extremal values x[torch.randn(*shape) > 0.5] = float('nan') x[torch.randn(*shape) > 0.5] = float('inf') x[torch.randn(*shape) > 0.5] = float('-inf') elif with_extremal and dtype.is_complex: x[torch.randn(*shape) > 0.5] = complex('nan') x[torch.randn(*shape) > 0.5] = complex('inf') x[torch.randn(*shape) > 0.5] = complex('-inf') elif dtype == torch.bool: x = torch.zeros(shape, dtype=dtype, device=device) x[torch.randn(*shape) > 0.5] = True else: x = torch.randint(15, 100, shape, dtype=dtype, device=device) return x def _test_where_scalar_template(self, device, dtype, exec_fn): for with_extremal in [True, False]: for ndims in range(0, 4): shape = self._rand_shape(ndims, min_size=5, max_size=10) for n in range(ndims + 1): for c in combinations(list(range(ndims)), n): for scalar_type in [int, float, complex]: if dtype.is_complex: condition = self._generate_input(shape, dtype, device, with_extremal).abs() > 0.5 else: condition = self._generate_input(shape, dtype, device, with_extremal) > 0.5 x = self._generate_input(shape, dtype, device, with_extremal) if not dtype.is_complex and scalar_type == complex: continue scalar_1 = scalar_type(random.random()) exec_fn(scalar_type, dtype, condition, x, scalar_1) # For current implementation, # below are the valid `TensorDtype` and `ScalarType` combinations. def _where_valid_scalar_tensor_combination(self, scalar_type, dtype): if (scalar_type == int and dtype == torch.long): return True elif (scalar_type == float and dtype == torch.double): return True elif (scalar_type == complex and dtype == torch.complex128): return True return False @onlyOnCPUAndCUDA @dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes() + torch.testing.get_all_complex_dtypes())) def test_where_scalar_invalid_combination_raises(self, device, dtype): def checkRaises(scalar_type, dtype, condition, x, scalar_1): if not self._where_valid_scalar_tensor_combination(scalar_type, dtype): # Note: This should fail once `where` supports type promotion. with self.assertRaisesRegex(RuntimeError, "expected scalar type"): torch.where(condition, x, scalar_1) self._test_where_scalar_template(device, dtype, checkRaises) @skipCUDAVersionIn([(11, 2)]) # test fails for 11.2, see https://github.com/pytorch/pytorch/issues/51980 @dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes() + torch.testing.get_all_complex_dtypes())) def test_where_scalar_valid_combination(self, device, dtype): def checkResult(scalar_type, dtype, condition, x, scalar_1): if self._where_valid_scalar_tensor_combination(scalar_type, dtype): def x_like(scalar, without_dtype=False): return torch.tensor(scalar, dtype=dtype, device=device).expand_as(x) # X = Tensor, Y = Scalar scalar_out = torch.where(condition, x, scalar_1) tensor_out = torch.where(condition, x, x_like(scalar_1)) self.assertEqual(scalar_out, tensor_out) # X = Scalar, Y = Tensor scalar_out = torch.where(condition, scalar_1, x) tensor_out = torch.where(condition, x_like(scalar_1), x) self.assertEqual(scalar_out, tensor_out) self._test_where_scalar_template(device, dtype, checkResult) # As the test fails with Runtime Error not raised on XLA @onlyOnCPUAndCUDA def test_where_scalar_scalar(self, device): # Scalar-Scalar Version height = 5 width = 5 default_dtype = torch.get_default_dtype() for test_default_dtype in [torch.float, torch.double]: torch.set_default_dtype(test_default_dtype) for scalar_type_1 in [int, float, complex]: for scalar_type_2 in [int, float, complex]: x1 = scalar_type_1(random.random() * random.randint(10, 20)) x2 = scalar_type_2(random.random() * random.randint(20, 30)) condition = torch.randn(height, width, device=device) > 0.5 if scalar_type_1 != scalar_type_2: self.assertRaisesRegex(RuntimeError, "expected scalar type", lambda: torch.where(condition, x1, x2)) else: def get_dtype(scalar_type): complex_dtype = torch.complex64 if torch.float == torch.get_default_dtype() else torch.complex128 type_map = {int: torch.long, float: torch.get_default_dtype(), complex: complex_dtype} return type_map[scalar_type] expected = torch.zeros((height, width), dtype=get_dtype(scalar_type_1)) expected[condition] = x1 expected[~condition] = x2 result = torch.where(condition, x1, x2) self.assertEqual(expected, result) # Reset the original dtype torch.set_default_dtype(default_dtype) # Tests that compare a device's computation with the (gold-standard) CPU's. class TestDevicePrecision(TestCase): exact_dtype = True @onlyCUDA def test_index_add_bfloat16(self, device): inp_tensor = torch.randn(5, 3, device='cpu').bfloat16() t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.bfloat16, device='cpu') index = torch.tensor([0, 4, 2], device='cpu') out_cpu = inp_tensor.index_add(0, index, t) inp_tensor = inp_tensor.to(device=device) t = t.to(device=device) index = index.to(device=device) out_gpu = inp_tensor.index_add(0, index, t) self.assertEqual(out_cpu, out_gpu, atol=1e-2, rtol=0) def test_device_serialization(self, device): x = torch.randn(4, 4, device=device) with tempfile.NamedTemporaryFile() as f: torch.save(x, f) f.seek(0) x_copy = torch.load(f) self.assertEqual(x_copy, x) self.assertIs(type(x_copy), type(x)) self.assertEqual(x_copy.device, x.device) @deviceCountAtLeast(2) def test_multidevice_serialization(self, devices): x = [torch.randn(4, 4, device=devices[0]), torch.randn(4, 4, device=devices[1])] with tempfile.NamedTemporaryFile() as f: torch.save(x, f) f.seek(0) x_copy = torch.load(f) for original, cp in zip(x, x_copy): self.assertEqual(cp, original) self.assertIs(type(cp), type(original)) self.assertEqual(cp.device, original.device) @deviceCountAtLeast(1) def test_copy_noncontig(self, devices): def do_test(d0, d1): x = torch.tensor([1.5, 2.5, 3.5, 4.5, 5.5, 6.5], device=d0) y = torch.tensor([0, 0, 0, 0, 0, 0], device=d1) self.assertNotEqual(x.dtype, y.dtype) y[::2].copy_(x[::2]) self.assertEqual(y, [1, 0, 3, 0, 5, 0]) do_test('cpu', devices[0]) do_test(devices[0], 'cpu') if len(devices) > 1: do_test(devices[0], devices[1]) @deviceCountAtLeast(2) def test_type_conversions_same_device(self, devices): x = torch.randn(5, 5, device=devices[1]) self.assertEqual(x.int().device, torch.device(devices[1])) self.assertEqual(x.type(torch.int).device, torch.device(devices[1])) self.assertEqual(x.to(torch.int).device, torch.device(devices[1])) @dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.short, torch.int, torch.long, torch.uint8) @dtypes(torch.float, torch.double, torch.int8, torch.short, torch.int, torch.long, torch.uint8) def test_from_sequence(self, device, dtype): seq = [list(range(i * 4, i * 4 + 4)) for i in range(5)] reference = torch.arange(0, 20).resize_(5, 4) self.assertEqual(torch.tensor(seq, dtype=dtype, device=device), reference, exact_dtype=False) @deviceCountAtLeast(1) def test_advancedindex_mixed_cpu_devices(self, devices) -> None: def test(x: torch.Tensor, ia: torch.Tensor, ib: torch.Tensor) -> None: # test getitem self.assertEqual(x[:, ia, None, ib, 0].cpu(), x.cpu()[:, ia.cpu(), None, ib.cpu(), 0]) self.assertEqual(x[ia], x.cpu()[ia.cpu()]) # test setitem x_clone1 = x.clone() x_clone2 = x.clone() first_shape = x[:, ia, None, ib, 0].shape second_shape = x[ia].shape x_clone1[:, ia, None, ib, 0] = torch.randn(first_shape).to(x_clone1) x_clone2[ia] = torch.randn(second_shape).to(x_clone2) cpu = torch.device('cpu') for device in devices: # Index cpu tensor with device tensor x = torch.randn(3, 4, 4, 4, 3) ia = torch.tensor([0, 2, 1]).to(device) ib = torch.tensor([0, 2, 1]).to(device) test(x, ia, ib) # Index device tensor with cpu tensor x = x.to(device) ia = ia.to(cpu) ib = ib.to(cpu) test(x, ia, ib) # Index cpu tensor with mixed cpu, device tensors x = x.to(cpu) ia = ia.to(cpu) ib = ib.to(device) test(x, ia, ib) # Index device tensor with mixed cpu, device tensors x = x.to(device) ia = ia.to(cpu) ib = ib.to(device) test(x, ia, ib) if len(devices) > 1: other_device = devices[0] if device == devices[0]: other_device = devices[1] # Index device tensor with mixed cpu, device tensors on different devices x = x.to(device) ia = ia.to(cpu) ib = ib.to(other_device) test(x, ia, ib) def test_copy_broadcast(self, device) -> None: x = torch.randn(10, 5) y = torch.randn(5, device=device) x.copy_(y) self.assertEqual(x[3], y) x = torch.randn(10, 5, device=device) y = torch.randn(5) x.copy_(y) self.assertEqual(x[3], y) @dtypes(torch.int64, torch.float32, torch.float64) def test_clamp(self, device, dtype): test_args = [ *product( [(100, 50), (10, 64), (97,)], # shape (True, False), # non-contiguous ) ] for shape, noncontig in test_args: x = make_tensor(shape, device=device, dtype=dtype, noncontiguous=noncontig) ub = make_tensor(shape, device=device, dtype=dtype, noncontiguous=noncontig) lb = make_tensor(shape, device=device, dtype=dtype, noncontiguous=noncontig) expect = x.max(lb).min(ub) actual = x.clamp(lb, ub) self.assertEqual(expect, actual) expect = np.clip(x.cpu().numpy(), lb.cpu().numpy(), ub.cpu().numpy()) self.assertEqual(expect, actual) expect = x.max(lb) actual = x.clamp(min=lb) self.assertEqual(expect, actual) expect = x.min(ub) actual = x.clamp(max=ub) self.assertEqual(expect, actual) # Test broadcasting min & max expect = x.max(lb[0]).min(ub[..., :1]) actual = x.clamp(lb[0], ub[..., :1]) self.assertEqual(expect, actual) # Test broadcasting x expect = x[..., :1].max(lb).min(ub) actual = x[..., :1].clamp(lb, ub) self.assertEqual(expect, actual) # we implemented custom deallocation for subclasses, so it behooves # us to make sure all of these bits work. We'll use __del__ to # track if objects die or not class Tracker: def __init__(self, marker): self.marker = marker @staticmethod def make(): marker = [False] return marker, Tracker(marker) def __del__(self): self.marker[0] = True @contextlib.contextmanager def disable_gc(): if gc.isenabled(): try: gc.disable() yield finally: gc.enable() else: yield class TestTorch(AbstractTestCases._TestTorchMixin): exact_dtype = True def test_tensor_ctor_scalar(self): x = torch.Tensor(torch.tensor(1.0)) self.assertEqual(x, torch.tensor(1.0)) def test_deepcopy_gradient(self): from copy import deepcopy a = torch.zeros(10) a.grad = torch.ones(10) self.assertEqual(a.grad, deepcopy(a).grad) s = torch.zeros(10).to_sparse() s.grad = torch.ones(10).to_sparse() self.assertEqual(s.grad, deepcopy(s).grad) # ensure sharing is not broken c = deepcopy([a, a.grad]) self.assertTrue(c[0].grad is c[1]) def test_tensor_base_init(self): # Direct construction not OK self.assertRaises(RuntimeError, lambda: torch._C._TensorBase()) # But construction of subclass is OK class T(torch._C._TensorBase): pass T() def test_tensor_base_new(self): # OK to call super().__new__, see # https://github.com/pytorch/pytorch/issues/57421 class TestTensor(torch._C._TensorBase): @staticmethod def __new__(cls, x, *args, **kwargs): return super().__new__(cls, x, *args, **kwargs) x = torch.ones(5) test_tensor = TestTensor(x) def test_pyobj_preserved(self): x = torch.empty(2) x.foo = 2 # put something on __dict__ y = torch.empty(2) y.grad = x del x # x is dead in Python self.assertEqual(y.grad.foo, 2) z = y.grad # it's live del z # it's dead again self.assertEqual(y.grad.foo, 2) def test_subclass_preserved(self): class MyTensor(torch._C._TensorBase): pass x = MyTensor(torch.empty(2)) y = torch.empty(2) y.grad = x del x # x is dead in Python self.assertEqual(type(y.grad), MyTensor) z = y.grad # it's live del z # it's dead again self.assertEqual(type(y.grad), MyTensor) def test_tensor_slot_dealloc(self): class SlotTensor1(torch._C._TensorBase): __slots__ = ['slot1'] class SlotTensor2(SlotTensor1): __slots__ = ['slot2'] m1, t1 = Tracker.make() m2, t2 = Tracker.make() slot_tensor = SlotTensor2(torch.empty(2)) slot_tensor.slot1 = t1 slot_tensor.slot2 = t2 del t1 del t2 self.assertFalse(m1[0]) self.assertFalse(m2[0]) del slot_tensor self.assertTrue(m1[0]) self.assertTrue(m2[0]) def test_tensor_dict_dealloc(self): m, t = Tracker.make() x = torch.empty(2) x.arf = t del t self.assertFalse(m[0]) del x self.assertTrue(m[0]) def test_tensor_finalizer_dealloc(self): m = [False] class FinalizerTensor(torch._C._TensorBase): def __del__(self): m[0] = True fin_tensor = FinalizerTensor(torch.empty(2)) self.assertFalse(m[0]) del fin_tensor self.assertTrue(m[0]) def test_tensor_weakref_dealloc(self): x = torch.empty(2) m = [False] def cb(r): m[0] = True wref = weakref.ref(x, cb) del x self.assertTrue(m[0]) self.assertEqual(wref(), None) def test_tensor_cycle_via_dict(self): m1, t1 = Tracker.make() x = torch.empty(2) x._tracker = t1 del t1 m2, t2 = Tracker.make() y = torch.empty(2) y._tracker = t2 del t2 x._loop = y y._loop = x # C++ reference should keep the cycle live! # This exercise THPVariable_subtype_traverse # NB: Because z.grad is a reference done entirely in C++, cycles # involving it directly are NOT broken by Python GC; you've # set up a good old C++ reference cycle which we cannot safely # break (because C++ references are allowed to be accessed # multithreaded-ly) (TODO: except maybe if you can prove that # only Python has access to the C++ object, in which case you can # also prove that no multithreaded access occurs) z = torch.empty(2) z.grad = x del x del y gc.collect() self.assertFalse(m1[0]) self.assertFalse(m2[0]) with disable_gc(): del z self.assertFalse(m1[0]) self.assertFalse(m2[0]) gc.collect() self.assertTrue(m1[0]) self.assertTrue(m2[0]) def test_tensor_cycle_via_slots(self): m1 = [False] m2 = [False] class SlotTensor1(torch._C._TensorBase): __slots__ = ['slot1'] def __del__(self): m1[0] = True class SlotTensor2(SlotTensor1): __slots__ = ['slot2'] def __del__(self): m2[0] = True x = SlotTensor1(torch.empty(2)) y = SlotTensor2(torch.empty(2)) x.slot1 = y y.slot2 = x del x with disable_gc(): del y self.assertFalse(m1[0]) self.assertFalse(m2[0]) gc.collect() self.assertTrue(m1[0]) self.assertTrue(m2[0]) def test_backward_hooks_traverse(self): m1, t1 = Tracker.make() m2, t2 = Tracker.make() x = torch.empty(2, requires_grad=True) x._tracker = t1 y = torch.empty(2, requires_grad=True) y._tracker = t2 del t1 del t2 # this hits a special setter, it's not just a __dict__ entry x._backward_hooks = y y._backward_hooks = x del x with disable_gc(): del y self.assertFalse(m1[0]) self.assertFalse(m2[0]) gc.collect() self.assertTrue(m1[0]) self.assertTrue(m2[0]) def test_dead_weak_ref(self): x = torch.empty(2) w_x = weakref.ref(x) y = torch.empty(2) y.grad = x del x x = w_x() # Ideally, x would keep the tensor live. But CPython doesn't # provide enough hooks to do this. So it will go dead and x # will transmute into an undefined tensor. Not great, but the # best we can do. del y self.assertRaises(RuntimeError, lambda: x.sigmoid()) def test_resurrected_weak_ref(self): x = torch.empty(2) w_x = weakref.ref(x) y = torch.empty(2) y.grad = x del x x = w_x() # Use this to manually fix weak references after dereferencing them x._fix_weakref() del y x.sigmoid() @torch.inference_mode() def test_bmm_multithreaded(self): device = 'cpu' num_threads = torch.get_num_threads() torch.set_num_threads(4) batch_sizes = [1, 10] M, N, O = 23, 8, 12 dtype = torch.float32 numpy_dtype = dtype def invert_perm(p): d = {x: i for i, x in enumerate(p)} return (d[0], d[1], d[2]) def generate_inputs(num_batches): # transposed tensors for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2): b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1) b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1) b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1)) b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2)) yield b1, b2 # broadcasting tensors for b1, b2, b3, b4, b5, b6 in itertools.product((True, False), repeat=6): shape1 = (num_batches if b1 else 1, M if b2 else 1, N if b3 else 1) shape2 = (num_batches if b4 else 1, N if b5 else 1, O if b6 else 1) b1 = make_tensor(shape1, device, dtype, low=-1, high=1).expand(num_batches, M, N) b2 = make_tensor(shape2, device, dtype, low=-1, high=1).expand(num_batches, N, O) yield b1, b2 # zero-sized tensors for z1, z2, z3, z4 in itertools.product((True, False), repeat=4): shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0) shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0) b1 = torch.randn(shape1, dtype=dtype, device=device) b2 = torch.randn(shape2, dtype=dtype, device=device) yield b1, b2 try: for num_batches in batch_sizes: for (b1, b2), perm3 in itertools.product(generate_inputs(num_batches), itertools.permutations((0, 1, 2))): res1 = torch.bmm(b1, b2) res2 = torch.full((num_batches, M, O), math.nan, dtype=dtype, device=device) \ .permute(perm3).contiguous().permute(invert_perm(perm3)) torch.bmm(b1, b2, out=res2) expect = torch.from_numpy( b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype) self.assertEqual(expect, res1) self.assertEqual(expect, res2) finally: torch.set_num_threads(num_threads) # TODO: these empy classes are temporarily instantiated for XLA compatibility # once XLA updates their test suite it should be removed class TestViewOps(TestCase): pass class TestTensorDeviceOps(TestCase): pass # Generates tests # Note: test generation must be done at file scope, not within main, or # pytest will fail. add_neg_dim_tests() instantiate_device_type_tests(TestViewOps, globals()) instantiate_device_type_tests(TestVitalSignsCuda, globals()) instantiate_device_type_tests(TestTensorDeviceOps, globals()) instantiate_device_type_tests(TestTorchDeviceType, globals()) instantiate_device_type_tests(TestDevicePrecision, globals(), except_for='cpu') if __name__ == '__main__': run_tests()
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """nn.Module with additional great features.""" import collections import copy import inspect import logging import os import tempfile import types import uuid from abc import ABC from argparse import Namespace from functools import partial from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union import torch from torch import ScriptModule, Tensor from torch.nn import Module from torch.optim.optimizer import Optimizer from pytorch_lightning.core.grads import GradInformation from pytorch_lightning.core.hooks import CheckpointHooks, DataHooks, ModelHooks from pytorch_lightning.core.memory import ModelSummary from pytorch_lightning.core.optimizer import LightningOptimizer from pytorch_lightning.core.saving import ALLOWED_CONFIG_TYPES, ModelIO, PRIMITIVE_TYPES from pytorch_lightning.core.step_result import Result from pytorch_lightning.utilities import rank_zero_deprecation, rank_zero_warn from pytorch_lightning.utilities.apply_func import apply_to_collection, convert_to_tensors from pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.parsing import AttributeDict, collect_init_args, save_hyperparameters from pytorch_lightning.utilities.types import EPOCH_OUTPUT, STEP_OUTPUT log = logging.getLogger(__name__) class LightningModule( ABC, DeviceDtypeModuleMixin, GradInformation, ModelIO, ModelHooks, DataHooks, CheckpointHooks, Module, ): # Below is for property support of JIT in PyTorch 1.7 # since none of these are important when using JIT, we are going to ignore them. __jit_unused_properties__ = [ "datamodule", "example_input_array", "hparams", "hparams_initial", "on_gpu", "current_epoch", "global_step", "global_rank", "local_rank", "logger", "model_size", "automatic_optimization", "truncated_bptt_steps", ] + DeviceDtypeModuleMixin.__jit_unused_properties__ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) # see (https://github.com/pytorch/pytorch/blob/3e6bb5233f9ca2c5aa55d9cda22a7ee85439aa6e/ # torch/nn/modules/module.py#L227) torch._C._log_api_usage_once(f"lightning.module.{self.__class__.__name__}") self.loaded_optimizer_states_dict = {} #: Pointer to the trainer object self.trainer = None self._distrib_type = None self._device_type = None #: True if using amp self.use_amp: bool = False #: The precision used self.precision: int = 32 # optionally can be set by user self._example_input_array = None self._datamodule = None self._results: Optional[Result] = None self._current_fx_name: str = '' self._running_manual_backward: bool = False self._current_hook_fx_name: Optional[str] = None self._current_dataloader_idx: Optional[int] = None self._automatic_optimization: bool = True self._truncated_bptt_steps: int = 0 self._param_requires_grad_state = dict() def optimizers(self, use_pl_optimizer: bool = True) -> Union[Optimizer, List[Optimizer], List[LightningOptimizer]]: if use_pl_optimizer: opts = list(self.trainer.lightning_optimizers.values()) else: opts = self.trainer.optimizers # single optimizer if isinstance(opts, list) and len(opts) == 1 and isinstance(opts[0], Optimizer): return opts[0] # multiple opts return opts def lr_schedulers(self) -> Optional[Union[Any, List[Any]]]: if not self.trainer.lr_schedulers: return None # ignore other keys "interval", "frequency", etc. lr_schedulers = [s["scheduler"] for s in self.trainer.lr_schedulers] # single scheduler if len(lr_schedulers) == 1: return lr_schedulers[0] # multiple schedulers return lr_schedulers @property def example_input_array(self) -> Any: return self._example_input_array @property def current_epoch(self) -> int: """The current epoch""" return self.trainer.current_epoch if self.trainer else 0 @property def global_step(self) -> int: """Total training batches seen across all epochs""" return self.trainer.global_step if self.trainer else 0 @property def global_rank(self) -> int: """ The index of the current process across all nodes and devices. """ return self.trainer.global_rank if self.trainer else 0 @property def local_rank(self) -> int: """ The index of the current process within a single node. """ return self.trainer.local_rank if self.trainer else 0 @example_input_array.setter def example_input_array(self, example: Any) -> None: self._example_input_array = example @property def datamodule(self) -> Any: rank_zero_deprecation( "The `LightningModule.datamodule` property is deprecated in v1.3 and will be removed in v1.5." " Access the datamodule through using `self.trainer.datamodule` instead." ) return self._datamodule @datamodule.setter def datamodule(self, datamodule: Any) -> None: self._datamodule = datamodule @property def on_gpu(self): """ True if your model is currently running on GPUs. Useful to set flags around the LightningModule for different CPU vs GPU behavior. """ return self.device.type == "cuda" @property def automatic_optimization(self) -> bool: """ If False you are responsible for calling .backward, .step, zero_grad. """ return self._automatic_optimization @automatic_optimization.setter def automatic_optimization(self, automatic_optimization: bool) -> None: self._automatic_optimization = automatic_optimization @property def truncated_bptt_steps(self) -> int: """ truncated_bptt_steps: Truncated back prop breaks performs backprop every k steps of much a longer sequence. If this is > 0, the training step is passed ``hiddens``. """ return self._truncated_bptt_steps @truncated_bptt_steps.setter def truncated_bptt_steps(self, truncated_bptt_steps: int) -> None: self._truncated_bptt_steps = truncated_bptt_steps @property def logger(self): """ Reference to the logger object in the Trainer. """ return self.trainer.logger if self.trainer else None def _apply_batch_transfer_handler(self, batch: Any, device: Optional[torch.device] = None, dataloader_idx: int = 0): batch = self.on_before_batch_transfer(batch, dataloader_idx) batch = self.transfer_batch_to_device(batch, device) batch = self.on_after_batch_transfer(batch, dataloader_idx) return batch def print(self, *args, **kwargs) -> None: r""" Prints only from process 0. Use this in any distributed mode to log only once. Args: *args: The thing to print. The same as for Python's built-in print function. **kwargs: The same as for Python's built-in print function. Example:: def forward(self, x): self.print(x, 'in forward') """ if self.trainer.is_global_zero: progress_bar = self.trainer.progress_bar_callback if progress_bar is not None and progress_bar.is_enabled: progress_bar.print(*args, **kwargs) else: print(*args, **kwargs) def log( self, name: str, value: Any, prog_bar: bool = False, logger: bool = True, on_step: Optional[bool] = None, on_epoch: Optional[bool] = None, reduce_fx: Callable = torch.mean, tbptt_reduce_fx: Callable = torch.mean, tbptt_pad_token: int = 0, enable_graph: bool = False, sync_dist: bool = False, sync_dist_op: Union[Any, str] = 'mean', sync_dist_group: Optional[Any] = None, add_dataloader_idx: bool = True, ): """ Log a key, value Example:: self.log('train_loss', loss) The default behavior per hook is as follows .. csv-table:: ``*`` also applies to the test loop :header: "LightningModule Hook", "on_step", "on_epoch", "prog_bar", "logger" :widths: 20, 10, 10, 10, 10 "training_step", "T", "F", "F", "T" "training_step_end", "T", "F", "F", "T" "training_epoch_end", "F", "T", "F", "T" "validation_step*", "F", "T", "F", "T" "validation_step_end*", "F", "T", "F", "T" "validation_epoch_end*", "F", "T", "F", "T" Args: name: key name value: value name prog_bar: if True logs to the progress bar logger: if True logs to the logger on_step: if True logs at this step. None auto-logs at the training_step but not validation/test_step on_epoch: if True logs epoch accumulated metrics. None auto-logs at the val/test step but not training_step reduce_fx: reduction function over step values for end of epoch. Torch.mean by default tbptt_reduce_fx: function to reduce on truncated back prop tbptt_pad_token: token to use for padding enable_graph: if True, will not auto detach the graph sync_dist: if True, reduces the metric across GPUs/TPUs sync_dist_op: the op to sync across GPUs/TPUs sync_dist_group: the ddp group to sync across add_dataloader_idx: if True, appends the index of the current dataloader to the name (when using multiple). If False, user needs to give unique names for each dataloader to not mix values """ if self._results is not None: # in any epoch end can't log step metrics (only epoch metric) if 'epoch_end' in self._current_fx_name and on_step: m = f'on_step=True cannot be used on {self._current_fx_name} method' raise MisconfigurationException(m) if 'epoch_end' in self._current_fx_name and on_epoch is False: m = f'on_epoch cannot be False when called from the {self._current_fx_name} method' raise MisconfigurationException(m) # add log_dict # TODO: if logged twice fail with crash # set the default depending on the fx_name on_step = self.__auto_choose_log_on_step(on_step) on_epoch = self.__auto_choose_log_on_epoch(on_epoch) if self._current_hook_fx_name is not None: self.trainer.logger_connector.check_logging_in_callbacks( self._current_hook_fx_name, on_step=on_step, on_epoch=on_epoch ) # make sure user doesn't introduce logic for multi-dataloaders if "/dataloader_idx_" in name: raise MisconfigurationException( f"Logged key: {name} should not contain information about dataloader_idx." ) training_type_plugin = self.trainer.training_type_plugin # Determine if dataloader index should be added dataloader_idx = self._current_dataloader_idx if add_dataloader_idx else None self._results.log( name, value, prog_bar, logger, on_step, on_epoch, reduce_fx, tbptt_reduce_fx, tbptt_pad_token, enable_graph, sync_dist, sync_dist_op, sync_dist_group, training_type_plugin.reduce, dataloader_idx, self.device, ) def log_dict( self, dictionary: dict, prog_bar: bool = False, logger: bool = True, on_step: Optional[bool] = None, on_epoch: Optional[bool] = None, reduce_fx: Callable = torch.mean, tbptt_reduce_fx: Callable = torch.mean, tbptt_pad_token: int = 0, enable_graph: bool = False, sync_dist: bool = False, sync_dist_op: Union[Any, str] = 'mean', sync_dist_group: Optional[Any] = None, add_dataloader_idx: bool = True, ): """ Log a dictonary of values at once Example:: values = {'loss': loss, 'acc': acc, ..., 'metric_n': metric_n} self.log_dict(values) Args: dictionary: key value pairs (str, tensors) prog_bar: if True logs to the progress base logger: if True logs to the logger on_step: if True logs at this step. None auto-logs for training_step but not validation/test_step on_epoch: if True logs epoch accumulated metrics. None auto-logs for val/test step but not training_step reduce_fx: reduction function over step values for end of epoch. Torch.mean by default tbptt_reduce_fx: function to reduce on truncated back prop tbptt_pad_token: token to use for padding enable_graph: if True, will not auto detach the graph sync_dist: if True, reduces the metric across GPUs/TPUs sync_dist_op: the op to sync across GPUs/TPUs sync_dist_group: the ddp group sync across add_dataloader_idx: if True, appends the index of the current dataloader to the name (when using multiple). If False, user needs to give unique names for each dataloader to not mix values """ for k, v in dictionary.items(): self.log( name=k, value=v, prog_bar=prog_bar, logger=logger, on_step=on_step, on_epoch=on_epoch, reduce_fx=reduce_fx, enable_graph=enable_graph, sync_dist=sync_dist, sync_dist_group=sync_dist_group, sync_dist_op=sync_dist_op, tbptt_pad_token=tbptt_pad_token, tbptt_reduce_fx=tbptt_reduce_fx, add_dataloader_idx=add_dataloader_idx ) def write_prediction( self, name: str, value: Union[torch.Tensor, List[torch.Tensor]], filename: str = 'predictions.pt' ): """ Write predictions to disk using ``torch.save`` Example:: self.write_prediction('pred', torch.tensor(...), filename='my_predictions.pt') Args: name: a string indicating the name to save the predictions under value: the predictions, either a single :class:`~torch.Tensor` or a list of them filename: name of the file to save the predictions to Note: when running in distributed mode, calling ``write_prediction`` will create a file for each device with respective names: ``filename_rank_0.pt``, ``filename_rank_1.pt``, ... .. deprecated::v1.3 Will be removed in v1.5.0. """ rank_zero_deprecation( 'LightningModule method `write_prediction` was deprecated in v1.3' ' and will be removed in v1.5.' ) self.trainer.evaluation_loop.predictions._add_prediction(name, value, filename) def write_prediction_dict(self, predictions_dict: Dict[str, Any], filename: str = 'predictions.pt'): """ Write a dictonary of predictions to disk at once using ``torch.save`` Example:: pred_dict = {'pred1': torch.tensor(...), 'pred2': torch.tensor(...)} self.write_prediction_dict(pred_dict) Args: predictions_dict: dict containing predictions, where each prediction should either be single :class:`~torch.Tensor` or a list of them Note: when running in distributed mode, calling ``write_prediction_dict`` will create a file for each device with respective names: ``filename_rank_0.pt``, ``filename_rank_1.pt``, ... .. deprecated::v1.3 Will be removed in v1.5.0. """ rank_zero_deprecation( 'LightningModule method `write_prediction_dict` was deprecated in v1.3 and' ' will be removed in v1.5.' ) for k, v in predictions_dict.items(): self.write_prediction(k, v, filename) def __auto_choose_log_on_step(self, on_step): if on_step is None: if self._current_fx_name in {'training_step', 'training_step_end'}: on_step = True elif self._current_fx_name in { 'evaluation_step', 'evaluation_step_end', 'evaluation_epoch_end', 'training_epoch_end' }: on_step = False else: on_step = False return on_step def __auto_choose_log_on_epoch(self, on_epoch): if on_epoch is None: if self._current_fx_name in {'training_step', 'training_step_end'}: on_epoch = False elif self._current_fx_name in { 'evaluation_step', 'evaluation_step_end', 'evaluation_epoch_end', 'training_epoch_end' }: on_epoch = True else: on_epoch = True return on_epoch def all_gather( self, data: Union[torch.Tensor, Dict, List, Tuple], group: Optional[Any] = None, sync_grads: bool = False, ): r""" Allows users to call ``self.all_gather()`` from the LightningModule, thus making the ```all_gather``` operation accelerator agnostic. ```all_gather``` is a function provided by accelerators to gather a tensor from several distributed processes Args: tensor: int, float, tensor of shape (batch, ...), or a (possibly nested) collection thereof. group: the process group to gather results from. Defaults to all processes (world) sync_grads: flag that allows users to synchronize gradients for all_gather op Return: A tensor of shape (world_size, batch, ...), or if the input was a collection the output will also be a collection with tensors of this shape. """ group = group if group is not None else torch.distributed.group.WORLD all_gather = self.trainer.accelerator.all_gather data = convert_to_tensors(data, device=self.device) all_gather = partial(all_gather, group=group, sync_grads=sync_grads) return apply_to_collection(data, torch.Tensor, all_gather) def forward(self, *args, **kwargs) -> Any: r""" Same as :meth:`torch.nn.Module.forward()`. Args: *args: Whatever you decide to pass into the forward method. **kwargs: Keyword arguments are also possible. Return: Your model's output """ return super().forward(*args, **kwargs) def training_step(self, *args, **kwargs) -> STEP_OUTPUT: r""" Here you compute and return the training loss and some additional metrics for e.g. the progress bar or logger. Args: batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]): The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list. batch_idx (int): Integer displaying index of this batch optimizer_idx (int): When using multiple optimizers, this argument will also be present. hiddens(:class:`~torch.Tensor`): Passed in if :paramref:`~pytorch_lightning.core.lightning.LightningModule.truncated_bptt_steps` > 0. Return: Any of. - :class:`~torch.Tensor` - The loss tensor - ``dict`` - A dictionary. Can include any keys, but must include the key ``'loss'`` - ``None`` - Training will skip to the next batch Note: Returning ``None`` is currently not supported for multi-GPU or TPU, or with 16-bit precision enabled. In this step you'd normally do the forward pass and calculate the loss for a batch. You can also do fancier things like multiple forward passes or something model specific. Example:: def training_step(self, batch, batch_idx): x, y, z = batch out = self.encoder(x) loss = self.loss(out, x) return loss If you define multiple optimizers, this step will be called with an additional ``optimizer_idx`` parameter. .. code-block:: python # Multiple optimizers (e.g.: GANs) def training_step(self, batch, batch_idx, optimizer_idx): if optimizer_idx == 0: # do training_step with encoder if optimizer_idx == 1: # do training_step with decoder If you add truncated back propagation through time you will also get an additional argument with the hidden states of the previous step. .. code-block:: python # Truncated back-propagation through time def training_step(self, batch, batch_idx, hiddens): # hiddens are the hidden states from the previous truncated backprop step ... out, hiddens = self.lstm(data, hiddens) ... return {'loss': loss, 'hiddens': hiddens} Note: The loss value shown in the progress bar is smoothed (averaged) over the last values, so it differs from the actual loss returned in train/validation step. """ rank_zero_warn("`training_step` must be implemented to be used with the Lightning Trainer") def training_step_end(self, *args, **kwargs) -> STEP_OUTPUT: """ Use this when training with dp or ddp2 because :meth:`training_step` will operate on only part of the batch. However, this is still optional and only needed for things like softmax or NCE loss. Note: If you later switch to ddp or some other mode, this will still be called so that you don't have to change your code .. code-block:: python # pseudocode sub_batches = split_batches_for_dp(batch) batch_parts_outputs = [training_step(sub_batch) for sub_batch in sub_batches] training_step_end(batch_parts_outputs) Args: batch_parts_outputs: What you return in `training_step` for each batch part. Return: Anything When using dp/ddp2 distributed backends, only a portion of the batch is inside the training_step: .. code-block:: python def training_step(self, batch, batch_idx): # batch is 1/num_gpus big x, y = batch out = self(x) # softmax uses only a portion of the batch in the denomintaor loss = self.softmax(out) loss = nce_loss(loss) return loss If you wish to do something with all the parts of the batch, then use this method to do it: .. code-block:: python def training_step(self, batch, batch_idx): # batch is 1/num_gpus big x, y = batch out = self.encoder(x) return {'pred': out} def training_step_end(self, training_step_outputs): gpu_0_pred = training_step_outputs[0]['pred'] gpu_1_pred = training_step_outputs[1]['pred'] gpu_n_pred = training_step_outputs[n]['pred'] # this softmax now uses the full batch loss = nce_loss([gpu_0_pred, gpu_1_pred, gpu_n_pred]) return loss See Also: See the :ref:`advanced/multi_gpu:Multi-GPU training` guide for more details. """ def training_epoch_end(self, outputs: EPOCH_OUTPUT) -> None: """ Called at the end of the training epoch with the outputs of all training steps. Use this in case you need to do something with all the outputs for every training_step. .. code-block:: python # the pseudocode for these calls train_outs = [] for train_batch in train_data: out = training_step(train_batch) train_outs.append(out) training_epoch_end(train_outs) Args: outputs: List of outputs you defined in :meth:`training_step`, or if there are multiple dataloaders, a list containing a list of outputs for each dataloader. Return: None Note: If this method is not overridden, this won't be called. Example:: def training_epoch_end(self, training_step_outputs): # do something with all training_step outputs return result With multiple dataloaders, ``outputs`` will be a list of lists. The outer list contains one entry per dataloader, while the inner list contains the individual outputs of each training step for that dataloader. .. code-block:: python def training_epoch_end(self, training_step_outputs): for out in training_step_outputs: # do something here """ def validation_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: r""" Operates on a single batch of data from the validation set. In this step you'd might generate examples or calculate anything of interest like accuracy. .. code-block:: python # the pseudocode for these calls val_outs = [] for val_batch in val_data: out = validation_step(val_batch) val_outs.append(out) validation_epoch_end(val_outs) Args: batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]): The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list. batch_idx (int): The index of this batch dataloader_idx (int): The index of the dataloader that produced this batch (only if multiple val dataloaders used) Return: Any of. - Any object or value - ``None`` - Validation will skip to the next batch .. code-block:: python # pseudocode of order val_outs = [] for val_batch in val_data: out = validation_step(val_batch) if defined('validation_step_end'): out = validation_step_end(out) val_outs.append(out) val_outs = validation_epoch_end(val_outs) .. code-block:: python # if you have one val dataloader: def validation_step(self, batch, batch_idx) # if you have multiple val dataloaders: def validation_step(self, batch, batch_idx, dataloader_idx) Examples:: # CASE 1: A single validation dataset def validation_step(self, batch, batch_idx): x, y = batch # implement your own out = self(x) loss = self.loss(out, y) # log 6 example images # or generated text... or whatever sample_imgs = x[:6] grid = torchvision.utils.make_grid(sample_imgs) self.logger.experiment.add_image('example_images', grid, 0) # calculate acc labels_hat = torch.argmax(out, dim=1) val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0) # log the outputs! self.log_dict({'val_loss': loss, 'val_acc': val_acc}) If you pass in multiple val dataloaders, :meth:`validation_step` will have an additional argument. .. code-block:: python # CASE 2: multiple validation dataloaders def validation_step(self, batch, batch_idx, dataloader_idx): # dataloader_idx tells you which dataset this is. Note: If you don't need to validate you don't need to implement this method. Note: When the :meth:`validation_step` is called, the model has been put in eval mode and PyTorch gradients have been disabled. At the end of validation, the model goes back to training mode and gradients are enabled. """ def validation_step_end(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: """ Use this when validating with dp or ddp2 because :meth:`validation_step` will operate on only part of the batch. However, this is still optional and only needed for things like softmax or NCE loss. Note: If you later switch to ddp or some other mode, this will still be called so that you don't have to change your code. .. code-block:: python # pseudocode sub_batches = split_batches_for_dp(batch) batch_parts_outputs = [validation_step(sub_batch) for sub_batch in sub_batches] validation_step_end(batch_parts_outputs) Args: batch_parts_outputs: What you return in :meth:`validation_step` for each batch part. Return: None or anything .. code-block:: python # WITHOUT validation_step_end # if used in DP or DDP2, this batch is 1/num_gpus large def validation_step(self, batch, batch_idx): # batch is 1/num_gpus big x, y = batch out = self.encoder(x) loss = self.softmax(out) loss = nce_loss(loss) self.log('val_loss', loss) # -------------- # with validation_step_end to do softmax over the full batch def validation_step(self, batch, batch_idx): # batch is 1/num_gpus big x, y = batch out = self(x) return out def validation_step_end(self, val_step_outputs): for out in val_step_outputs: # do something with these See Also: See the :ref:`advanced/multi_gpu:Multi-GPU training` guide for more details. """ def validation_epoch_end(self, outputs: EPOCH_OUTPUT) -> None: """ Called at the end of the validation epoch with the outputs of all validation steps. .. code-block:: python # the pseudocode for these calls val_outs = [] for val_batch in val_data: out = validation_step(val_batch) val_outs.append(out) validation_epoch_end(val_outs) Args: outputs: List of outputs you defined in :meth:`validation_step`, or if there are multiple dataloaders, a list containing a list of outputs for each dataloader. Return: None Note: If you didn't define a :meth:`validation_step`, this won't be called. Examples: With a single dataloader: .. code-block:: python def validation_epoch_end(self, val_step_outputs): for out in val_step_outputs: # do something With multiple dataloaders, `outputs` will be a list of lists. The outer list contains one entry per dataloader, while the inner list contains the individual outputs of each validation step for that dataloader. .. code-block:: python def validation_epoch_end(self, outputs): for dataloader_output_result in outputs: dataloader_outs = dataloader_output_result.dataloader_i_outputs self.log('final_metric', final_value) """ def test_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: r""" Operates on a single batch of data from the test set. In this step you'd normally generate examples or calculate anything of interest such as accuracy. .. code-block:: python # the pseudocode for these calls test_outs = [] for test_batch in test_data: out = test_step(test_batch) test_outs.append(out) test_epoch_end(test_outs) Args: batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]): The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list. batch_idx (int): The index of this batch. dataloader_idx (int): The index of the dataloader that produced this batch (only if multiple test dataloaders used). Return: Any of. - Any object or value - ``None`` - Testing will skip to the next batch .. code-block:: python # if you have one test dataloader: def test_step(self, batch, batch_idx) # if you have multiple test dataloaders: def test_step(self, batch, batch_idx, dataloader_idx) Examples:: # CASE 1: A single test dataset def test_step(self, batch, batch_idx): x, y = batch # implement your own out = self(x) loss = self.loss(out, y) # log 6 example images # or generated text... or whatever sample_imgs = x[:6] grid = torchvision.utils.make_grid(sample_imgs) self.logger.experiment.add_image('example_images', grid, 0) # calculate acc labels_hat = torch.argmax(out, dim=1) test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0) # log the outputs! self.log_dict({'test_loss': loss, 'test_acc': test_acc}) If you pass in multiple test dataloaders, :meth:`test_step` will have an additional argument. .. code-block:: python # CASE 2: multiple test dataloaders def test_step(self, batch, batch_idx, dataloader_idx): # dataloader_idx tells you which dataset this is. Note: If you don't need to test you don't need to implement this method. Note: When the :meth:`test_step` is called, the model has been put in eval mode and PyTorch gradients have been disabled. At the end of the test epoch, the model goes back to training mode and gradients are enabled. """ def test_step_end(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: """ Use this when testing with dp or ddp2 because :meth:`test_step` will operate on only part of the batch. However, this is still optional and only needed for things like softmax or NCE loss. Note: If you later switch to ddp or some other mode, this will still be called so that you don't have to change your code. .. code-block:: python # pseudocode sub_batches = split_batches_for_dp(batch) batch_parts_outputs = [test_step(sub_batch) for sub_batch in sub_batches] test_step_end(batch_parts_outputs) Args: batch_parts_outputs: What you return in :meth:`test_step` for each batch part. Return: None or anything .. code-block:: python # WITHOUT test_step_end # if used in DP or DDP2, this batch is 1/num_gpus large def test_step(self, batch, batch_idx): # batch is 1/num_gpus big x, y = batch out = self(x) loss = self.softmax(out) self.log('test_loss', loss) # -------------- # with test_step_end to do softmax over the full batch def test_step(self, batch, batch_idx): # batch is 1/num_gpus big x, y = batch out = self.encoder(x) return out def test_step_end(self, output_results): # this out is now the full size of the batch all_test_step_outs = output_results.out loss = nce_loss(all_test_step_outs) self.log('test_loss', loss) See Also: See the :ref:`advanced/multi_gpu:Multi-GPU training` guide for more details. """ def test_epoch_end(self, outputs: EPOCH_OUTPUT) -> None: """ Called at the end of a test epoch with the output of all test steps. .. code-block:: python # the pseudocode for these calls test_outs = [] for test_batch in test_data: out = test_step(test_batch) test_outs.append(out) test_epoch_end(test_outs) Args: outputs: List of outputs you defined in :meth:`test_step_end`, or if there are multiple dataloaders, a list containing a list of outputs for each dataloader Return: None Note: If you didn't define a :meth:`test_step`, this won't be called. Examples: With a single dataloader: .. code-block:: python def test_epoch_end(self, outputs): # do something with the outputs of all test batches all_test_preds = test_step_outputs.predictions some_result = calc_all_results(all_test_preds) self.log(some_result) With multiple dataloaders, `outputs` will be a list of lists. The outer list contains one entry per dataloader, while the inner list contains the individual outputs of each test step for that dataloader. .. code-block:: python def test_epoch_end(self, outputs): final_value = 0 for dataloader_outputs in outputs: for test_step_out in dataloader_outputs: # do something final_value += test_step_out self.log('final_metric', final_value) """ def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any: """ Step function called during :meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`. By default, it calls :meth:`~pytorch_lightning.core.lightning.LightningModule.forward`. Override to add any processing logic. Args: batch: Current batch batch_idx: Index of current batch dataloader_idx: Index of the current dataloader Return: Predicted output """ return self(batch) def configure_callbacks(self): """ Configure model-specific callbacks. When the model gets attached, e.g., when ``.fit()`` or ``.test()`` gets called, the list returned here will be merged with the list of callbacks passed to the Trainer's ``callbacks`` argument. If a callback returned here has the same type as one or several callbacks already present in the Trainer's callbacks list, it will take priority and replace them. In addition, Lightning will make sure :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` callbacks run last. Return: A list of callbacks which will extend the list of callbacks in the Trainer. Example:: def configure_callbacks(self): early_stop = EarlyStopping(monitor"val_acc", mode="max") checkpoint = ModelCheckpoint(monitor="val_loss") return [early_stop, checkpoint] Note: Certain callback methods like :meth:`~pytorch_lightning.callbacks.base.Callback.on_init_start` will never be invoked on the new callbacks returned here. """ return [] def configure_optimizers(self): r""" Choose what optimizers and learning-rate schedulers to use in your optimization. Normally you'd need one. But in the case of GANs or similar you might have multiple. Return: Any of these 6 options. - **Single optimizer**. - **List or Tuple** of optimizers. - **Two lists** - The first list has multiple optimizers, and the second has multiple LR schedulers (or multiple lr_dict). - **Dictionary**, with an ``"optimizer"`` key, and (optionally) a ``"lr_scheduler"`` key whose value is a single LR scheduler or lr_dict. - **Tuple of dictionaries** as described above, with an optional ``"frequency"`` key. - **None** - Fit will run without any optimizer. Note: The ``frequency`` value specified in a dict along with the ``optimizer`` key is an int corresponding to the number of sequential batches optimized with the specific optimizer. It should be given to none or to all of the optimizers. There is a difference between passing multiple optimizers in a list, and passing multiple optimizers in dictionaries with a frequency of 1: In the former case, all optimizers will operate on the given batch in each optimization step. In the latter, only one optimizer will operate on the given batch at every step. This is different from the ``frequency`` value specified in the lr_dict mentioned below. .. code-block:: python def configure_optimizers(self): optimizer_one = torch.optim.SGD(self.model.parameters(), lr=0.01) optimizer_two = torch.optim.SGD(self.model.parameters(), lr=0.01) return [ {'optimizer': optimizer_one, 'frequency': 5}, {'optimizer': optimizer_two, 'frequency': 10}, ] In this example, the first optimizer will be used for the first 5 steps, the second optimizer for the next 10 steps and that cycle will continue. If an LR scheduler is specified for an optimizer using the ``lr_scheduler`` key in the above dict, the scheduler will only be updated when its optimizer is being used. Note: The lr_dict is a dictionary which contains the scheduler and its associated configuration. The default configuration is shown below. .. code-block:: python lr_dict = { 'scheduler': lr_scheduler, # The LR scheduler instance (required) 'interval': 'epoch', # The unit of the scheduler's step size 'frequency': 1, # The frequency of the scheduler 'reduce_on_plateau': False, # For ReduceLROnPlateau scheduler 'monitor': 'val_loss', # Metric for ReduceLROnPlateau to monitor 'strict': True, # Whether to crash the training if `monitor` is not found 'name': None, # Custom name for LearningRateMonitor to use } Only the ``"scheduler"`` key is required, the rest will be set to the defaults above. Note: The ``"frequency"`` value is an ``int`` corresponding to the number of sequential batches optimized with the specific optimizer. It should be given to none or to all of the optimizers. There is a difference between passing multiple optimizers in a list and passing multiple optimizers in dictionaries with a frequency of 1: In the former case, all optimizers will operate on the given batch in each optimization step. In the latter, only one optimizer will operate on the given batch at every step. Examples:: # most cases def configure_optimizers(self): return Adam(self.parameters(), lr=1e-3) # multiple optimizer case (e.g.: GAN) def configure_optimizers(self): gen_opt = Adam(self.model_gen.parameters(), lr=0.01) dis_opt = Adam(self.model_dis.parameters(), lr=0.02) return gen_opt, dis_opt # example with learning rate schedulers def configure_optimizers(self): gen_opt = Adam(self.model_gen.parameters(), lr=0.01) dis_opt = Adam(self.model_dis.parameters(), lr=0.02) dis_sch = CosineAnnealing(dis_opt, T_max=10) return [gen_opt, dis_opt], [dis_sch] # example with step-based learning rate schedulers def configure_optimizers(self): gen_opt = Adam(self.model_gen.parameters(), lr=0.01) dis_opt = Adam(self.model_dis.parameters(), lr=0.02) gen_sch = {'scheduler': ExponentialLR(gen_opt, 0.99), 'interval': 'step'} # called after each training step dis_sch = CosineAnnealing(dis_opt, T_max=10) # called every epoch return [gen_opt, dis_opt], [gen_sch, dis_sch] # example with optimizer frequencies # see training procedure in `Improved Training of Wasserstein GANs`, Algorithm 1 # https://arxiv.org/abs/1704.00028 def configure_optimizers(self): gen_opt = Adam(self.model_gen.parameters(), lr=0.01) dis_opt = Adam(self.model_dis.parameters(), lr=0.02) n_critic = 5 return ( {'optimizer': dis_opt, 'frequency': n_critic}, {'optimizer': gen_opt, 'frequency': 1} ) Note: Some things to know: - Lightning calls ``.backward()`` and ``.step()`` on each optimizer and learning rate scheduler as needed. - If you use 16-bit precision (``precision=16``), Lightning will automatically handle the optimizers. - If you use multiple optimizers, :meth:`training_step` will have an additional ``optimizer_idx`` parameter. - If you use :class:`torch.optim.LBFGS`, Lightning handles the closure function automatically for you. - If you use multiple optimizers, gradients will be calculated only for the parameters of current optimizer at each training step. - If you need to control how often those optimizers step or override the default ``.step()`` schedule, override the :meth:`optimizer_step` hook. - If you only want to call a learning rate scheduler every ``x`` step or epoch, or want to monitor a custom metric, you can specify these in a lr_dict: .. code-block:: python lr_dict = { 'scheduler': lr_scheduler, 'interval': 'step', # or 'epoch' 'monitor': 'val_f1', 'frequency': x, } """ rank_zero_warn("`configure_optimizers` must be implemented to be used with the Lightning Trainer") def manual_backward(self, loss: Tensor, optimizer: Optional[Optimizer] = None, *args, **kwargs) -> None: """ Call this directly from your training_step when doing optimizations manually. By using this we can ensure that all the proper scaling when using 16-bit etc has been done for you. This function forwards all args to the .backward() call as well. See :ref:`manual optimization<common/optimizers:Manual optimization>` for more examples. Example:: def training_step(...): opt = self.optimizers() loss = ... opt.zero_grad() # automatically applies scaling, etc... self.manual_backward(loss) opt.step() """ if optimizer is not None: rank_zero_deprecation( "`optimizer` argument to `manual_backward` is deprecated in v1.2 and will be removed in v1.4" ) # make sure we're using manual opt self._verify_is_manual_optimization('manual_backward') # backward self._running_manual_backward = True self.trainer.train_loop.backward(loss, optimizer=None, opt_idx=None, *args, **kwargs) self._running_manual_backward = False def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args, **kwargs) -> None: """ Override backward with your own implementation if you need to. Args: loss: Loss is already scaled by accumulated grads optimizer: Current optimizer being used optimizer_idx: Index of the current optimizer being used Called to perform backward step. Feel free to override as needed. The loss passed in has already been scaled for accumulated gradients if requested. Example:: def backward(self, loss, optimizer, optimizer_idx): loss.backward() """ if self.automatic_optimization or self._running_manual_backward: loss.backward(*args, **kwargs) def toggle_optimizer(self, optimizer: Optimizer, optimizer_idx: int): """ Makes sure only the gradients of the current optimizer's parameters are calculated in the training step to prevent dangling gradients in multiple-optimizer setup. .. note:: Only called when using multiple optimizers Override for your own behavior It works with ``untoggle_optimizer`` to make sure param_requires_grad_state is properly reset. Args: optimizer: Current optimizer used in training_loop optimizer_idx: Current optimizer idx in training_loop """ # Iterate over all optimizer parameters to preserve their `requires_grad` information # in case these are pre-defined during `configure_optimizers` param_requires_grad_state = {} for opt in self.optimizers(use_pl_optimizer=False): for group in opt.param_groups: for param in group['params']: # If a param already appear in param_requires_grad_state, continue if param in param_requires_grad_state: continue param_requires_grad_state[param] = param.requires_grad param.requires_grad = False # Then iterate over the current optimizer's parameters and set its `requires_grad` # properties accordingly for group in optimizer.param_groups: for param in group['params']: param.requires_grad = param_requires_grad_state[param] self._param_requires_grad_state = param_requires_grad_state def untoggle_optimizer(self, optimizer_idx: int): """ .. note:: Only called when using multiple optimizers Override for your own behavior Args: optimizer_idx: Current optimizer idx in training_loop """ for opt_idx, opt in enumerate(self.optimizers(use_pl_optimizer=False)): if optimizer_idx != opt_idx: for group in opt.param_groups: for param in group['params']: if param in self._param_requires_grad_state: param.requires_grad = self._param_requires_grad_state[param] # save memory self._param_requires_grad_state = dict() def optimizer_step( self, epoch: int = None, batch_idx: int = None, optimizer: Optimizer = None, optimizer_idx: int = None, optimizer_closure: Optional[Callable] = None, on_tpu: bool = None, using_native_amp: bool = None, using_lbfgs: bool = None, ) -> None: r""" Override this method to adjust the default way the :class:`~pytorch_lightning.trainer.trainer.Trainer` calls each optimizer. By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example once per optimizer. Warning: If you are overriding this method, make sure that you pass the ``optimizer_closure`` parameter to ``optimizer.step()`` function as shown in the examples. This ensures that ``training_step()``, ``optimizer.zero_grad()``, ``backward()`` are called within :meth:`~pytorch_lightning.trainer.training_loop.TrainLoop.run_training_batch`. Args: epoch: Current epoch batch_idx: Index of current batch optimizer: A PyTorch optimizer optimizer_idx: If you used multiple optimizers, this indexes into that list. optimizer_closure: Closure for all optimizers on_tpu: ``True`` if TPU backward is required using_native_amp: ``True`` if using native amp using_lbfgs: True if the matching optimizer is :class:`torch.optim.LBFGS` Examples:: # DEFAULT def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, on_tpu, using_native_amp, using_lbfgs): optimizer.step(closure=optimizer_closure) # Alternating schedule for optimizer steps (i.e.: GANs) def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, on_tpu, using_native_amp, using_lbfgs): # update generator opt every step if optimizer_idx == 0: optimizer.step(closure=optimizer_closure) # update discriminator opt every 2 steps if optimizer_idx == 1: if (batch_idx + 1) % 2 == 0 : optimizer.step(closure=optimizer_closure) # ... # add as many optimizers as you want Here's another example showing how to use this for more advanced things such as learning rate warm-up: .. code-block:: python # learning rate warm-up def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, on_tpu, using_native_amp, using_lbfgs): # warm up lr if self.trainer.global_step < 500: lr_scale = min(1., float(self.trainer.global_step + 1) / 500.) for pg in optimizer.param_groups: pg['lr'] = lr_scale * self.learning_rate # update params optimizer.step(closure=optimizer_closure) """ optimizer.step(closure=optimizer_closure) def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int): """Override this method to change the default behaviour of ``optimizer.zero_grad()``. Args: epoch: Current epoch batch_idx: Index of current batch optimizer: A PyTorch optimizer optimizer_idx: If you used multiple optimizers this indexes into that list. Examples:: # DEFAULT def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx): optimizer.zero_grad() # Set gradients to `None` instead of zero to improve performance. def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx): optimizer.zero_grad(set_to_none=True) See :meth:`torch.optim.Optimizer.zero_grad` for the explanation of the above example. """ optimizer.zero_grad() def tbptt_split_batch(self, batch: Tensor, split_size: int) -> list: r""" When using truncated backpropagation through time, each batch must be split along the time dimension. Lightning handles this by default, but for custom behavior override this function. Args: batch: Current batch split_size: The size of the split Return: List of batch splits. Each split will be passed to :meth:`training_step` to enable truncated back propagation through time. The default implementation splits root level Tensors and Sequences at dim=1 (i.e. time dim). It assumes that each time dim is the same length. Examples:: def tbptt_split_batch(self, batch, split_size): splits = [] for t in range(0, time_dims[0], split_size): batch_split = [] for i, x in enumerate(batch): if isinstance(x, torch.Tensor): split_x = x[:, t:t + split_size] elif isinstance(x, collections.Sequence): split_x = [None] * len(x) for batch_idx in range(len(x)): split_x[batch_idx] = x[batch_idx][t:t + split_size] batch_split.append(split_x) splits.append(batch_split) return splits Note: Called in the training loop after :meth:`~pytorch_lightning.callbacks.base.Callback.on_batch_start` if :paramref:`~pytorch_lightning.core.lightning.LightningModule.truncated_bptt_steps` > 0. Each returned batch split is passed separately to :meth:`training_step`. """ time_dims = [len(x[0]) for x in batch if isinstance(x, (torch.Tensor, collections.Sequence))] assert len(time_dims) >= 1, "Unable to determine batch time dimension" assert all(x == time_dims[0] for x in time_dims), "Batch time dimension length is ambiguous" splits = [] for t in range(0, time_dims[0], split_size): batch_split = [] for i, x in enumerate(batch): if isinstance(x, torch.Tensor): split_x = x[:, t:t + split_size] elif isinstance(x, collections.Sequence): split_x = [None] * len(x) for batch_idx in range(len(x)): split_x[batch_idx] = x[batch_idx][t:t + split_size] batch_split.append(split_x) splits.append(batch_split) return splits def summarize(self, mode: Optional[str] = ModelSummary.MODE_DEFAULT) -> Optional[ModelSummary]: model_summary = None if mode in ModelSummary.MODES: model_summary = ModelSummary(self, mode=mode) log.info("\n" + str(model_summary)) elif mode is not None: raise MisconfigurationException(f"`mode` can be None, {", ".join(ModelSummary.MODES)}, got {mode}") return model_summary def freeze(self) -> None: r""" Freeze all params for inference. Example:: model = MyLightningModule(...) model.freeze() """ for param in self.parameters(): param.requires_grad = False self.eval() def unfreeze(self) -> None: """ Unfreeze all parameters for training. .. code-block:: python model = MyLightningModule(...) model.unfreeze() """ for param in self.parameters(): param.requires_grad = True self.train() def get_progress_bar_dict(self) -> Dict[str, Union[int, str]]: r""" Implement this to override the default items displayed in the progress bar. By default it includes the average loss value, split index of BPTT (if used) and the version of the experiment when using a logger. .. code-block:: Epoch 1: 4%|▎ | 40/1095 [00:03<01:37, 10.84it/s, loss=4.501, v_num=10] Here is an example how to override the defaults: .. code-block:: python def get_progress_bar_dict(self): # don't show the version number items = super().get_progress_bar_dict() items.pop("v_num", None) return items Return: Dictionary with the items to be displayed in the progress bar. """ # call .item() only once but store elements without graphs running_train_loss = self.trainer.train_loop.running_loss.mean() avg_training_loss = None if running_train_loss is not None: avg_training_loss = running_train_loss.cpu().item() elif self.automatic_optimization: avg_training_loss = float('NaN') tqdm_dict = {} if avg_training_loss is not None: tqdm_dict["loss"] = f"{avg_training_loss:.3g}" module_tbptt_enabled = self.truncated_bptt_steps > 0 trainer_tbptt_enabled = self.trainer.truncated_bptt_steps is not None and self.trainer.truncated_bptt_steps > 0 if module_tbptt_enabled or trainer_tbptt_enabled: tqdm_dict["split_idx"] = self.trainer.split_idx if self.trainer.logger is not None and self.trainer.logger.version is not None: version = self.trainer.logger.version # show last 4 places of long version strings version = version[-4:] if isinstance(version, str) else version tqdm_dict["v_num"] = version return tqdm_dict def _verify_is_manual_optimization(self, fn_name): if self.automatic_optimization: raise MisconfigurationException( f'to use {fn_name}, please disable automatic optimization:' ' set model property `automatic_optimization` as False' ) @classmethod def _auto_collect_arguments(cls, frame=None) -> Tuple[Dict, Dict]: """ Collect all module arguments in the current constructor and all child constructors. The child constructors are all the ``__init__`` methods that reach the current class through (chained) ``super().__init__()`` calls. Args: frame: instance frame Returns: self_arguments: arguments dictionary of the first instance parents_arguments: arguments dictionary of the parent's instances """ if not frame: frame = inspect.currentframe() frame_args = collect_init_args(frame.f_back, []) self_arguments = frame_args[-1] # set hyper_parameters in child self_arguments = self_arguments parents_arguments = {} # add all arguments from parents for args in frame_args[:-1]: parents_arguments.update(args) return self_arguments, parents_arguments def save_hyperparameters( self, *args, ignore: Optional[Union[Sequence[str], str]] = None, frame: Optional[types.FrameType] = None ) -> None: """Save model arguments to ``hparams`` attribute. Args: args: single object of `dict`, `NameSpace` or `OmegaConf` or string names or arguments from class ``__init__`` ignore: an argument name or a list of argument names from class ``__init__`` to be ignored frame: a frame object. Default is None Example:: >>> class ManuallyArgsModel(LightningModule): ... def __init__(self, arg1, arg2, arg3): ... super().__init__() ... # manually assign arguments ... self.save_hyperparameters('arg1', 'arg3') ... def forward(self, *args, **kwargs): ... ... >>> model = ManuallyArgsModel(1, 'abc', 3.14) >>> model.hparams "arg1": 1 "arg3": 3.14 >>> class AutomaticArgsModel(LightningModule): ... def __init__(self, arg1, arg2, arg3): ... super().__init__() ... # equivalent automatic ... self.save_hyperparameters() ... def forward(self, *args, **kwargs): ... ... >>> model = AutomaticArgsModel(1, 'abc', 3.14) >>> model.hparams "arg1": 1 "arg2": abc "arg3": 3.14 >>> class SingleArgModel(LightningModule): ... def __init__(self, params): ... super().__init__() ... # manually assign single argument ... self.save_hyperparameters(params) ... def forward(self, *args, **kwargs): ... ... >>> model = SingleArgModel(Namespace(p1=1, p2='abc', p3=3.14)) >>> model.hparams "p1": 1 "p2": abc "p3": 3.14 >>> class ManuallyArgsModel(LightningModule): ... def __init__(self, arg1, arg2, arg3): ... super().__init__() ... # pass argument(s) to ignore as a string or in a list ... self.save_hyperparameters(ignore='arg2') ... def forward(self, *args, **kwargs): ... ... >>> model = ManuallyArgsModel(1, 'abc', 3.14) >>> model.hparams "arg1": 1 "arg3": 3.14 """ # the frame needs to be created in this file. if not frame: frame = inspect.currentframe().f_back save_hyperparameters(self, *args, ignore=ignore, frame=frame) def _set_hparams(self, hp: Union[dict, Namespace, str]) -> None: if isinstance(hp, Namespace): hp = vars(hp) if isinstance(hp, dict): hp = AttributeDict(hp) elif isinstance(hp, PRIMITIVE_TYPES): raise ValueError(f"Primitives {PRIMITIVE_TYPES} are not allowed.") elif not isinstance(hp, ALLOWED_CONFIG_TYPES): raise ValueError(f"Unsupported config type of {type(hp)}.") if isinstance(hp, dict) and isinstance(self.hparams, dict): self.hparams.update(hp) else: self._hparams = hp @torch.no_grad() def to_onnx( self, file_path: Union[str, Path], input_sample: Optional[Any] = None, **kwargs, ): """ Saves the model in ONNX format Args: file_path: The path of the file the onnx model should be saved to. input_sample: An input for tracing. Default: None (Use self.example_input_array) **kwargs: Will be passed to torch.onnx.export function. Example: >>> class SimpleModel(LightningModule): ... def __init__(self): ... super().__init__() ... self.l1 = torch.nn.Linear(in_features=64, out_features=4) ... ... def forward(self, x): ... return torch.relu(self.l1(x.view(x.size(0), -1))) >>> with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as tmpfile: ... model = SimpleModel() ... input_sample = torch.randn((1, 64)) ... model.to_onnx(tmpfile.name, input_sample, export_params=True) ... os.path.isfile(tmpfile.name) True """ mode = self.training if input_sample is None: if self.example_input_array is None: raise ValueError( "Could not export to ONNX since neither `input_sample` nor" " `model.example_input_array` attribute is set." ) input_sample = self.example_input_array input_sample = self._apply_batch_transfer_handler(input_sample) if "example_outputs" not in kwargs: self.eval() kwargs["example_outputs"] = self(input_sample) torch.onnx.export(self, input_sample, file_path, **kwargs) self.train(mode) @torch.no_grad() def to_torchscript( self, file_path: Optional[Union[str, Path]] = None, method: Optional[str] = 'script', example_inputs: Optional[Any] = None, **kwargs, ) -> Union[ScriptModule, Dict[str, ScriptModule]]: """ By default compiles the whole model to a :class:`~torch.jit.ScriptModule`. If you want to use tracing, please provided the argument `method='trace'` and make sure that either the example_inputs argument is provided, or the model has self.example_input_array set. If you would like to customize the modules that are scripted you should override this method. In case you want to return multiple modules, we recommend using a dictionary. Args: file_path: Path where to save the torchscript. Default: None (no file saved). method: Whether to use TorchScript's script or trace method. Default: 'script' example_inputs: An input to be used to do tracing when method is set to 'trace'. Default: None (Use self.example_input_array) **kwargs: Additional arguments that will be passed to the :func:`torch.jit.script` or :func:`torch.jit.trace` function. Note: - Requires the implementation of the :meth:`~pytorch_lightning.core.lightning.LightningModule.forward` method. - The exported script will be set to evaluation mode. - It is recommended that you install the latest supported version of PyTorch to use this feature without limitations. See also the :mod:`torch.jit` documentation for supported features. Example: >>> class SimpleModel(LightningModule): ... def __init__(self): ... super().__init__() ... self.l1 = torch.nn.Linear(in_features=64, out_features=4) ... ... def forward(self, x): ... return torch.relu(self.l1(x.view(x.size(0), -1))) ... >>> model = SimpleModel() >>> torch.jit.save(model.to_torchscript(), "model.pt") # doctest: +SKIP >>> os.path.isfile("model.pt") # doctest: +SKIP >>> torch.jit.save(model.to_torchscript(file_path="model_trace.pt", method='trace', # doctest: +SKIP ... example_inputs=torch.randn(1, 64))) # doctest: +SKIP >>> os.path.isfile("model_trace.pt") # doctest: +SKIP True Return: This LightningModule as a torchscript, regardless of whether file_path is defined or not. """ mode = self.training if method == 'script': torchscript_module = torch.jit.script(self.eval(), **kwargs) elif method == 'trace': # if no example inputs are provided, try to see if model has example_input_array set if example_inputs is None: if self.example_input_array is None: raise ValueError( 'Choosing method=`trace` requires either `example_inputs`' ' or `model.example_input_array` to be defined.' ) example_inputs = self.example_input_array # automatically send example inputs to the right device and use trace example_inputs = self._apply_batch_transfer_handler(example_inputs) torchscript_module = torch.jit.trace(func=self.eval(), example_inputs=example_inputs, **kwargs) else: raise ValueError(f"The 'method' parameter only supports 'script' or 'trace', but value given was: {method}") self.train(mode) if file_path is not None: torch.jit.save(torchscript_module, file_path) return torchscript_module @property def hparams(self) -> Union[AttributeDict, dict, Namespace]: if not hasattr(self, "_hparams"): self._hparams = AttributeDict() return self._hparams @property def hparams_initial(self) -> AttributeDict: if not hasattr(self, "_hparams_initial"): return AttributeDict() # prevent any change return copy.deepcopy(self._hparams_initial) @property def model_size(self) -> float: # todo: think about better way without need to dump model to drive tmp_name = f"{uuid.uuid4().hex}.pt" torch.save(self.state_dict(), tmp_name) size_mb = os.path.getsize(tmp_name) / 1e6 os.remove(tmp_name) return size_mb
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """nn.Module with additional great features.""" import collections import copy import inspect import logging import os import tempfile import types import uuid from abc import ABC from argparse import Namespace from functools import partial from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union import torch from torch import ScriptModule, Tensor from torch.nn import Module from torch.optim.optimizer import Optimizer from pytorch_lightning.core.grads import GradInformation from pytorch_lightning.core.hooks import CheckpointHooks, DataHooks, ModelHooks from pytorch_lightning.core.memory import ModelSummary from pytorch_lightning.core.optimizer import LightningOptimizer from pytorch_lightning.core.saving import ALLOWED_CONFIG_TYPES, ModelIO, PRIMITIVE_TYPES from pytorch_lightning.core.step_result import Result from pytorch_lightning.utilities import rank_zero_deprecation, rank_zero_warn from pytorch_lightning.utilities.apply_func import apply_to_collection, convert_to_tensors from pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.parsing import AttributeDict, collect_init_args, save_hyperparameters from pytorch_lightning.utilities.types import EPOCH_OUTPUT, STEP_OUTPUT log = logging.getLogger(__name__) class LightningModule( ABC, DeviceDtypeModuleMixin, GradInformation, ModelIO, ModelHooks, DataHooks, CheckpointHooks, Module, ): # Below is for property support of JIT in PyTorch 1.7 # since none of these are important when using JIT, we are going to ignore them. __jit_unused_properties__ = [ "datamodule", "example_input_array", "hparams", "hparams_initial", "on_gpu", "current_epoch", "global_step", "global_rank", "local_rank", "logger", "model_size", "automatic_optimization", "truncated_bptt_steps", ] + DeviceDtypeModuleMixin.__jit_unused_properties__ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) # see (https://github.com/pytorch/pytorch/blob/3e6bb5233f9ca2c5aa55d9cda22a7ee85439aa6e/ # torch/nn/modules/module.py#L227) torch._C._log_api_usage_once(f"lightning.module.{self.__class__.__name__}") self.loaded_optimizer_states_dict = {} #: Pointer to the trainer object self.trainer = None self._distrib_type = None self._device_type = None #: True if using amp self.use_amp: bool = False #: The precision used self.precision: int = 32 # optionally can be set by user self._example_input_array = None self._datamodule = None self._results: Optional[Result] = None self._current_fx_name: str = '' self._running_manual_backward: bool = False self._current_hook_fx_name: Optional[str] = None self._current_dataloader_idx: Optional[int] = None self._automatic_optimization: bool = True self._truncated_bptt_steps: int = 0 self._param_requires_grad_state = dict() def optimizers(self, use_pl_optimizer: bool = True) -> Union[Optimizer, List[Optimizer], List[LightningOptimizer]]: if use_pl_optimizer: opts = list(self.trainer.lightning_optimizers.values()) else: opts = self.trainer.optimizers # single optimizer if isinstance(opts, list) and len(opts) == 1 and isinstance(opts[0], Optimizer): return opts[0] # multiple opts return opts def lr_schedulers(self) -> Optional[Union[Any, List[Any]]]: if not self.trainer.lr_schedulers: return None # ignore other keys "interval", "frequency", etc. lr_schedulers = [s["scheduler"] for s in self.trainer.lr_schedulers] # single scheduler if len(lr_schedulers) == 1: return lr_schedulers[0] # multiple schedulers return lr_schedulers @property def example_input_array(self) -> Any: return self._example_input_array @property def current_epoch(self) -> int: """The current epoch""" return self.trainer.current_epoch if self.trainer else 0 @property def global_step(self) -> int: """Total training batches seen across all epochs""" return self.trainer.global_step if self.trainer else 0 @property def global_rank(self) -> int: """ The index of the current process across all nodes and devices. """ return self.trainer.global_rank if self.trainer else 0 @property def local_rank(self) -> int: """ The index of the current process within a single node. """ return self.trainer.local_rank if self.trainer else 0 @example_input_array.setter def example_input_array(self, example: Any) -> None: self._example_input_array = example @property def datamodule(self) -> Any: rank_zero_deprecation( "The `LightningModule.datamodule` property is deprecated in v1.3 and will be removed in v1.5." " Access the datamodule through using `self.trainer.datamodule` instead." ) return self._datamodule @datamodule.setter def datamodule(self, datamodule: Any) -> None: self._datamodule = datamodule @property def on_gpu(self): """ True if your model is currently running on GPUs. Useful to set flags around the LightningModule for different CPU vs GPU behavior. """ return self.device.type == "cuda" @property def automatic_optimization(self) -> bool: """ If False you are responsible for calling .backward, .step, zero_grad. """ return self._automatic_optimization @automatic_optimization.setter def automatic_optimization(self, automatic_optimization: bool) -> None: self._automatic_optimization = automatic_optimization @property def truncated_bptt_steps(self) -> int: """ truncated_bptt_steps: Truncated back prop breaks performs backprop every k steps of much a longer sequence. If this is > 0, the training step is passed ``hiddens``. """ return self._truncated_bptt_steps @truncated_bptt_steps.setter def truncated_bptt_steps(self, truncated_bptt_steps: int) -> None: self._truncated_bptt_steps = truncated_bptt_steps @property def logger(self): """ Reference to the logger object in the Trainer. """ return self.trainer.logger if self.trainer else None def _apply_batch_transfer_handler(self, batch: Any, device: Optional[torch.device] = None, dataloader_idx: int = 0): batch = self.on_before_batch_transfer(batch, dataloader_idx) batch = self.transfer_batch_to_device(batch, device) batch = self.on_after_batch_transfer(batch, dataloader_idx) return batch def print(self, *args, **kwargs) -> None: r""" Prints only from process 0. Use this in any distributed mode to log only once. Args: *args: The thing to print. The same as for Python's built-in print function. **kwargs: The same as for Python's built-in print function. Example:: def forward(self, x): self.print(x, 'in forward') """ if self.trainer.is_global_zero: progress_bar = self.trainer.progress_bar_callback if progress_bar is not None and progress_bar.is_enabled: progress_bar.print(*args, **kwargs) else: print(*args, **kwargs) def log( self, name: str, value: Any, prog_bar: bool = False, logger: bool = True, on_step: Optional[bool] = None, on_epoch: Optional[bool] = None, reduce_fx: Callable = torch.mean, tbptt_reduce_fx: Callable = torch.mean, tbptt_pad_token: int = 0, enable_graph: bool = False, sync_dist: bool = False, sync_dist_op: Union[Any, str] = 'mean', sync_dist_group: Optional[Any] = None, add_dataloader_idx: bool = True, ): """ Log a key, value Example:: self.log('train_loss', loss) The default behavior per hook is as follows .. csv-table:: ``*`` also applies to the test loop :header: "LightningModule Hook", "on_step", "on_epoch", "prog_bar", "logger" :widths: 20, 10, 10, 10, 10 "training_step", "T", "F", "F", "T" "training_step_end", "T", "F", "F", "T" "training_epoch_end", "F", "T", "F", "T" "validation_step*", "F", "T", "F", "T" "validation_step_end*", "F", "T", "F", "T" "validation_epoch_end*", "F", "T", "F", "T" Args: name: key name value: value name prog_bar: if True logs to the progress bar logger: if True logs to the logger on_step: if True logs at this step. None auto-logs at the training_step but not validation/test_step on_epoch: if True logs epoch accumulated metrics. None auto-logs at the val/test step but not training_step reduce_fx: reduction function over step values for end of epoch. Torch.mean by default tbptt_reduce_fx: function to reduce on truncated back prop tbptt_pad_token: token to use for padding enable_graph: if True, will not auto detach the graph sync_dist: if True, reduces the metric across GPUs/TPUs sync_dist_op: the op to sync across GPUs/TPUs sync_dist_group: the ddp group to sync across add_dataloader_idx: if True, appends the index of the current dataloader to the name (when using multiple). If False, user needs to give unique names for each dataloader to not mix values """ if self._results is not None: # in any epoch end can't log step metrics (only epoch metric) if 'epoch_end' in self._current_fx_name and on_step: m = f'on_step=True cannot be used on {self._current_fx_name} method' raise MisconfigurationException(m) if 'epoch_end' in self._current_fx_name and on_epoch is False: m = f'on_epoch cannot be False when called from the {self._current_fx_name} method' raise MisconfigurationException(m) # add log_dict # TODO: if logged twice fail with crash # set the default depending on the fx_name on_step = self.__auto_choose_log_on_step(on_step) on_epoch = self.__auto_choose_log_on_epoch(on_epoch) if self._current_hook_fx_name is not None: self.trainer.logger_connector.check_logging_in_callbacks( self._current_hook_fx_name, on_step=on_step, on_epoch=on_epoch ) # make sure user doesn't introduce logic for multi-dataloaders if "/dataloader_idx_" in name: raise MisconfigurationException( f"Logged key: {name} should not contain information about dataloader_idx." ) training_type_plugin = self.trainer.training_type_plugin # Determine if dataloader index should be added dataloader_idx = self._current_dataloader_idx if add_dataloader_idx else None self._results.log( name, value, prog_bar, logger, on_step, on_epoch, reduce_fx, tbptt_reduce_fx, tbptt_pad_token, enable_graph, sync_dist, sync_dist_op, sync_dist_group, training_type_plugin.reduce, dataloader_idx, self.device, ) def log_dict( self, dictionary: dict, prog_bar: bool = False, logger: bool = True, on_step: Optional[bool] = None, on_epoch: Optional[bool] = None, reduce_fx: Callable = torch.mean, tbptt_reduce_fx: Callable = torch.mean, tbptt_pad_token: int = 0, enable_graph: bool = False, sync_dist: bool = False, sync_dist_op: Union[Any, str] = 'mean', sync_dist_group: Optional[Any] = None, add_dataloader_idx: bool = True, ): """ Log a dictonary of values at once Example:: values = {'loss': loss, 'acc': acc, ..., 'metric_n': metric_n} self.log_dict(values) Args: dictionary: key value pairs (str, tensors) prog_bar: if True logs to the progress base logger: if True logs to the logger on_step: if True logs at this step. None auto-logs for training_step but not validation/test_step on_epoch: if True logs epoch accumulated metrics. None auto-logs for val/test step but not training_step reduce_fx: reduction function over step values for end of epoch. Torch.mean by default tbptt_reduce_fx: function to reduce on truncated back prop tbptt_pad_token: token to use for padding enable_graph: if True, will not auto detach the graph sync_dist: if True, reduces the metric across GPUs/TPUs sync_dist_op: the op to sync across GPUs/TPUs sync_dist_group: the ddp group sync across add_dataloader_idx: if True, appends the index of the current dataloader to the name (when using multiple). If False, user needs to give unique names for each dataloader to not mix values """ for k, v in dictionary.items(): self.log( name=k, value=v, prog_bar=prog_bar, logger=logger, on_step=on_step, on_epoch=on_epoch, reduce_fx=reduce_fx, enable_graph=enable_graph, sync_dist=sync_dist, sync_dist_group=sync_dist_group, sync_dist_op=sync_dist_op, tbptt_pad_token=tbptt_pad_token, tbptt_reduce_fx=tbptt_reduce_fx, add_dataloader_idx=add_dataloader_idx ) def write_prediction( self, name: str, value: Union[torch.Tensor, List[torch.Tensor]], filename: str = 'predictions.pt' ): """ Write predictions to disk using ``torch.save`` Example:: self.write_prediction('pred', torch.tensor(...), filename='my_predictions.pt') Args: name: a string indicating the name to save the predictions under value: the predictions, either a single :class:`~torch.Tensor` or a list of them filename: name of the file to save the predictions to Note: when running in distributed mode, calling ``write_prediction`` will create a file for each device with respective names: ``filename_rank_0.pt``, ``filename_rank_1.pt``, ... .. deprecated::v1.3 Will be removed in v1.5.0. """ rank_zero_deprecation( 'LightningModule method `write_prediction` was deprecated in v1.3' ' and will be removed in v1.5.' ) self.trainer.evaluation_loop.predictions._add_prediction(name, value, filename) def write_prediction_dict(self, predictions_dict: Dict[str, Any], filename: str = 'predictions.pt'): """ Write a dictonary of predictions to disk at once using ``torch.save`` Example:: pred_dict = {'pred1': torch.tensor(...), 'pred2': torch.tensor(...)} self.write_prediction_dict(pred_dict) Args: predictions_dict: dict containing predictions, where each prediction should either be single :class:`~torch.Tensor` or a list of them Note: when running in distributed mode, calling ``write_prediction_dict`` will create a file for each device with respective names: ``filename_rank_0.pt``, ``filename_rank_1.pt``, ... .. deprecated::v1.3 Will be removed in v1.5.0. """ rank_zero_deprecation( 'LightningModule method `write_prediction_dict` was deprecated in v1.3 and' ' will be removed in v1.5.' ) for k, v in predictions_dict.items(): self.write_prediction(k, v, filename) def __auto_choose_log_on_step(self, on_step): if on_step is None: if self._current_fx_name in {'training_step', 'training_step_end'}: on_step = True elif self._current_fx_name in { 'evaluation_step', 'evaluation_step_end', 'evaluation_epoch_end', 'training_epoch_end' }: on_step = False else: on_step = False return on_step def __auto_choose_log_on_epoch(self, on_epoch): if on_epoch is None: if self._current_fx_name in {'training_step', 'training_step_end'}: on_epoch = False elif self._current_fx_name in { 'evaluation_step', 'evaluation_step_end', 'evaluation_epoch_end', 'training_epoch_end' }: on_epoch = True else: on_epoch = True return on_epoch def all_gather( self, data: Union[torch.Tensor, Dict, List, Tuple], group: Optional[Any] = None, sync_grads: bool = False, ): r""" Allows users to call ``self.all_gather()`` from the LightningModule, thus making the ```all_gather``` operation accelerator agnostic. ```all_gather``` is a function provided by accelerators to gather a tensor from several distributed processes Args: tensor: int, float, tensor of shape (batch, ...), or a (possibly nested) collection thereof. group: the process group to gather results from. Defaults to all processes (world) sync_grads: flag that allows users to synchronize gradients for all_gather op Return: A tensor of shape (world_size, batch, ...), or if the input was a collection the output will also be a collection with tensors of this shape. """ group = group if group is not None else torch.distributed.group.WORLD all_gather = self.trainer.accelerator.all_gather data = convert_to_tensors(data, device=self.device) all_gather = partial(all_gather, group=group, sync_grads=sync_grads) return apply_to_collection(data, torch.Tensor, all_gather) def forward(self, *args, **kwargs) -> Any: r""" Same as :meth:`torch.nn.Module.forward()`. Args: *args: Whatever you decide to pass into the forward method. **kwargs: Keyword arguments are also possible. Return: Your model's output """ return super().forward(*args, **kwargs) def training_step(self, *args, **kwargs) -> STEP_OUTPUT: r""" Here you compute and return the training loss and some additional metrics for e.g. the progress bar or logger. Args: batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]): The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list. batch_idx (int): Integer displaying index of this batch optimizer_idx (int): When using multiple optimizers, this argument will also be present. hiddens(:class:`~torch.Tensor`): Passed in if :paramref:`~pytorch_lightning.core.lightning.LightningModule.truncated_bptt_steps` > 0. Return: Any of. - :class:`~torch.Tensor` - The loss tensor - ``dict`` - A dictionary. Can include any keys, but must include the key ``'loss'`` - ``None`` - Training will skip to the next batch Note: Returning ``None`` is currently not supported for multi-GPU or TPU, or with 16-bit precision enabled. In this step you'd normally do the forward pass and calculate the loss for a batch. You can also do fancier things like multiple forward passes or something model specific. Example:: def training_step(self, batch, batch_idx): x, y, z = batch out = self.encoder(x) loss = self.loss(out, x) return loss If you define multiple optimizers, this step will be called with an additional ``optimizer_idx`` parameter. .. code-block:: python # Multiple optimizers (e.g.: GANs) def training_step(self, batch, batch_idx, optimizer_idx): if optimizer_idx == 0: # do training_step with encoder if optimizer_idx == 1: # do training_step with decoder If you add truncated back propagation through time you will also get an additional argument with the hidden states of the previous step. .. code-block:: python # Truncated back-propagation through time def training_step(self, batch, batch_idx, hiddens): # hiddens are the hidden states from the previous truncated backprop step ... out, hiddens = self.lstm(data, hiddens) ... return {'loss': loss, 'hiddens': hiddens} Note: The loss value shown in the progress bar is smoothed (averaged) over the last values, so it differs from the actual loss returned in train/validation step. """ rank_zero_warn("`training_step` must be implemented to be used with the Lightning Trainer") def training_step_end(self, *args, **kwargs) -> STEP_OUTPUT: """ Use this when training with dp or ddp2 because :meth:`training_step` will operate on only part of the batch. However, this is still optional and only needed for things like softmax or NCE loss. Note: If you later switch to ddp or some other mode, this will still be called so that you don't have to change your code .. code-block:: python # pseudocode sub_batches = split_batches_for_dp(batch) batch_parts_outputs = [training_step(sub_batch) for sub_batch in sub_batches] training_step_end(batch_parts_outputs) Args: batch_parts_outputs: What you return in `training_step` for each batch part. Return: Anything When using dp/ddp2 distributed backends, only a portion of the batch is inside the training_step: .. code-block:: python def training_step(self, batch, batch_idx): # batch is 1/num_gpus big x, y = batch out = self(x) # softmax uses only a portion of the batch in the denomintaor loss = self.softmax(out) loss = nce_loss(loss) return loss If you wish to do something with all the parts of the batch, then use this method to do it: .. code-block:: python def training_step(self, batch, batch_idx): # batch is 1/num_gpus big x, y = batch out = self.encoder(x) return {'pred': out} def training_step_end(self, training_step_outputs): gpu_0_pred = training_step_outputs[0]['pred'] gpu_1_pred = training_step_outputs[1]['pred'] gpu_n_pred = training_step_outputs[n]['pred'] # this softmax now uses the full batch loss = nce_loss([gpu_0_pred, gpu_1_pred, gpu_n_pred]) return loss See Also: See the :ref:`advanced/multi_gpu:Multi-GPU training` guide for more details. """ def training_epoch_end(self, outputs: EPOCH_OUTPUT) -> None: """ Called at the end of the training epoch with the outputs of all training steps. Use this in case you need to do something with all the outputs for every training_step. .. code-block:: python # the pseudocode for these calls train_outs = [] for train_batch in train_data: out = training_step(train_batch) train_outs.append(out) training_epoch_end(train_outs) Args: outputs: List of outputs you defined in :meth:`training_step`, or if there are multiple dataloaders, a list containing a list of outputs for each dataloader. Return: None Note: If this method is not overridden, this won't be called. Example:: def training_epoch_end(self, training_step_outputs): # do something with all training_step outputs return result With multiple dataloaders, ``outputs`` will be a list of lists. The outer list contains one entry per dataloader, while the inner list contains the individual outputs of each training step for that dataloader. .. code-block:: python def training_epoch_end(self, training_step_outputs): for out in training_step_outputs: # do something here """ def validation_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: r""" Operates on a single batch of data from the validation set. In this step you'd might generate examples or calculate anything of interest like accuracy. .. code-block:: python # the pseudocode for these calls val_outs = [] for val_batch in val_data: out = validation_step(val_batch) val_outs.append(out) validation_epoch_end(val_outs) Args: batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]): The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list. batch_idx (int): The index of this batch dataloader_idx (int): The index of the dataloader that produced this batch (only if multiple val dataloaders used) Return: Any of. - Any object or value - ``None`` - Validation will skip to the next batch .. code-block:: python # pseudocode of order val_outs = [] for val_batch in val_data: out = validation_step(val_batch) if defined('validation_step_end'): out = validation_step_end(out) val_outs.append(out) val_outs = validation_epoch_end(val_outs) .. code-block:: python # if you have one val dataloader: def validation_step(self, batch, batch_idx) # if you have multiple val dataloaders: def validation_step(self, batch, batch_idx, dataloader_idx) Examples:: # CASE 1: A single validation dataset def validation_step(self, batch, batch_idx): x, y = batch # implement your own out = self(x) loss = self.loss(out, y) # log 6 example images # or generated text... or whatever sample_imgs = x[:6] grid = torchvision.utils.make_grid(sample_imgs) self.logger.experiment.add_image('example_images', grid, 0) # calculate acc labels_hat = torch.argmax(out, dim=1) val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0) # log the outputs! self.log_dict({'val_loss': loss, 'val_acc': val_acc}) If you pass in multiple val dataloaders, :meth:`validation_step` will have an additional argument. .. code-block:: python # CASE 2: multiple validation dataloaders def validation_step(self, batch, batch_idx, dataloader_idx): # dataloader_idx tells you which dataset this is. Note: If you don't need to validate you don't need to implement this method. Note: When the :meth:`validation_step` is called, the model has been put in eval mode and PyTorch gradients have been disabled. At the end of validation, the model goes back to training mode and gradients are enabled. """ def validation_step_end(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: """ Use this when validating with dp or ddp2 because :meth:`validation_step` will operate on only part of the batch. However, this is still optional and only needed for things like softmax or NCE loss. Note: If you later switch to ddp or some other mode, this will still be called so that you don't have to change your code. .. code-block:: python # pseudocode sub_batches = split_batches_for_dp(batch) batch_parts_outputs = [validation_step(sub_batch) for sub_batch in sub_batches] validation_step_end(batch_parts_outputs) Args: batch_parts_outputs: What you return in :meth:`validation_step` for each batch part. Return: None or anything .. code-block:: python # WITHOUT validation_step_end # if used in DP or DDP2, this batch is 1/num_gpus large def validation_step(self, batch, batch_idx): # batch is 1/num_gpus big x, y = batch out = self.encoder(x) loss = self.softmax(out) loss = nce_loss(loss) self.log('val_loss', loss) # -------------- # with validation_step_end to do softmax over the full batch def validation_step(self, batch, batch_idx): # batch is 1/num_gpus big x, y = batch out = self(x) return out def validation_step_end(self, val_step_outputs): for out in val_step_outputs: # do something with these See Also: See the :ref:`advanced/multi_gpu:Multi-GPU training` guide for more details. """ def validation_epoch_end(self, outputs: EPOCH_OUTPUT) -> None: """ Called at the end of the validation epoch with the outputs of all validation steps. .. code-block:: python # the pseudocode for these calls val_outs = [] for val_batch in val_data: out = validation_step(val_batch) val_outs.append(out) validation_epoch_end(val_outs) Args: outputs: List of outputs you defined in :meth:`validation_step`, or if there are multiple dataloaders, a list containing a list of outputs for each dataloader. Return: None Note: If you didn't define a :meth:`validation_step`, this won't be called. Examples: With a single dataloader: .. code-block:: python def validation_epoch_end(self, val_step_outputs): for out in val_step_outputs: # do something With multiple dataloaders, `outputs` will be a list of lists. The outer list contains one entry per dataloader, while the inner list contains the individual outputs of each validation step for that dataloader. .. code-block:: python def validation_epoch_end(self, outputs): for dataloader_output_result in outputs: dataloader_outs = dataloader_output_result.dataloader_i_outputs self.log('final_metric', final_value) """ def test_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: r""" Operates on a single batch of data from the test set. In this step you'd normally generate examples or calculate anything of interest such as accuracy. .. code-block:: python # the pseudocode for these calls test_outs = [] for test_batch in test_data: out = test_step(test_batch) test_outs.append(out) test_epoch_end(test_outs) Args: batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]): The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list. batch_idx (int): The index of this batch. dataloader_idx (int): The index of the dataloader that produced this batch (only if multiple test dataloaders used). Return: Any of. - Any object or value - ``None`` - Testing will skip to the next batch .. code-block:: python # if you have one test dataloader: def test_step(self, batch, batch_idx) # if you have multiple test dataloaders: def test_step(self, batch, batch_idx, dataloader_idx) Examples:: # CASE 1: A single test dataset def test_step(self, batch, batch_idx): x, y = batch # implement your own out = self(x) loss = self.loss(out, y) # log 6 example images # or generated text... or whatever sample_imgs = x[:6] grid = torchvision.utils.make_grid(sample_imgs) self.logger.experiment.add_image('example_images', grid, 0) # calculate acc labels_hat = torch.argmax(out, dim=1) test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0) # log the outputs! self.log_dict({'test_loss': loss, 'test_acc': test_acc}) If you pass in multiple test dataloaders, :meth:`test_step` will have an additional argument. .. code-block:: python # CASE 2: multiple test dataloaders def test_step(self, batch, batch_idx, dataloader_idx): # dataloader_idx tells you which dataset this is. Note: If you don't need to test you don't need to implement this method. Note: When the :meth:`test_step` is called, the model has been put in eval mode and PyTorch gradients have been disabled. At the end of the test epoch, the model goes back to training mode and gradients are enabled. """ def test_step_end(self, *args, **kwargs) -> Optional[STEP_OUTPUT]: """ Use this when testing with dp or ddp2 because :meth:`test_step` will operate on only part of the batch. However, this is still optional and only needed for things like softmax or NCE loss. Note: If you later switch to ddp or some other mode, this will still be called so that you don't have to change your code. .. code-block:: python # pseudocode sub_batches = split_batches_for_dp(batch) batch_parts_outputs = [test_step(sub_batch) for sub_batch in sub_batches] test_step_end(batch_parts_outputs) Args: batch_parts_outputs: What you return in :meth:`test_step` for each batch part. Return: None or anything .. code-block:: python # WITHOUT test_step_end # if used in DP or DDP2, this batch is 1/num_gpus large def test_step(self, batch, batch_idx): # batch is 1/num_gpus big x, y = batch out = self(x) loss = self.softmax(out) self.log('test_loss', loss) # -------------- # with test_step_end to do softmax over the full batch def test_step(self, batch, batch_idx): # batch is 1/num_gpus big x, y = batch out = self.encoder(x) return out def test_step_end(self, output_results): # this out is now the full size of the batch all_test_step_outs = output_results.out loss = nce_loss(all_test_step_outs) self.log('test_loss', loss) See Also: See the :ref:`advanced/multi_gpu:Multi-GPU training` guide for more details. """ def test_epoch_end(self, outputs: EPOCH_OUTPUT) -> None: """ Called at the end of a test epoch with the output of all test steps. .. code-block:: python # the pseudocode for these calls test_outs = [] for test_batch in test_data: out = test_step(test_batch) test_outs.append(out) test_epoch_end(test_outs) Args: outputs: List of outputs you defined in :meth:`test_step_end`, or if there are multiple dataloaders, a list containing a list of outputs for each dataloader Return: None Note: If you didn't define a :meth:`test_step`, this won't be called. Examples: With a single dataloader: .. code-block:: python def test_epoch_end(self, outputs): # do something with the outputs of all test batches all_test_preds = test_step_outputs.predictions some_result = calc_all_results(all_test_preds) self.log(some_result) With multiple dataloaders, `outputs` will be a list of lists. The outer list contains one entry per dataloader, while the inner list contains the individual outputs of each test step for that dataloader. .. code-block:: python def test_epoch_end(self, outputs): final_value = 0 for dataloader_outputs in outputs: for test_step_out in dataloader_outputs: # do something final_value += test_step_out self.log('final_metric', final_value) """ def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any: """ Step function called during :meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`. By default, it calls :meth:`~pytorch_lightning.core.lightning.LightningModule.forward`. Override to add any processing logic. Args: batch: Current batch batch_idx: Index of current batch dataloader_idx: Index of the current dataloader Return: Predicted output """ return self(batch) def configure_callbacks(self): """ Configure model-specific callbacks. When the model gets attached, e.g., when ``.fit()`` or ``.test()`` gets called, the list returned here will be merged with the list of callbacks passed to the Trainer's ``callbacks`` argument. If a callback returned here has the same type as one or several callbacks already present in the Trainer's callbacks list, it will take priority and replace them. In addition, Lightning will make sure :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` callbacks run last. Return: A list of callbacks which will extend the list of callbacks in the Trainer. Example:: def configure_callbacks(self): early_stop = EarlyStopping(monitor"val_acc", mode="max") checkpoint = ModelCheckpoint(monitor="val_loss") return [early_stop, checkpoint] Note: Certain callback methods like :meth:`~pytorch_lightning.callbacks.base.Callback.on_init_start` will never be invoked on the new callbacks returned here. """ return [] def configure_optimizers(self): r""" Choose what optimizers and learning-rate schedulers to use in your optimization. Normally you'd need one. But in the case of GANs or similar you might have multiple. Return: Any of these 6 options. - **Single optimizer**. - **List or Tuple** of optimizers. - **Two lists** - The first list has multiple optimizers, and the second has multiple LR schedulers (or multiple lr_dict). - **Dictionary**, with an ``"optimizer"`` key, and (optionally) a ``"lr_scheduler"`` key whose value is a single LR scheduler or lr_dict. - **Tuple of dictionaries** as described above, with an optional ``"frequency"`` key. - **None** - Fit will run without any optimizer. Note: The ``frequency`` value specified in a dict along with the ``optimizer`` key is an int corresponding to the number of sequential batches optimized with the specific optimizer. It should be given to none or to all of the optimizers. There is a difference between passing multiple optimizers in a list, and passing multiple optimizers in dictionaries with a frequency of 1: In the former case, all optimizers will operate on the given batch in each optimization step. In the latter, only one optimizer will operate on the given batch at every step. This is different from the ``frequency`` value specified in the lr_dict mentioned below. .. code-block:: python def configure_optimizers(self): optimizer_one = torch.optim.SGD(self.model.parameters(), lr=0.01) optimizer_two = torch.optim.SGD(self.model.parameters(), lr=0.01) return [ {'optimizer': optimizer_one, 'frequency': 5}, {'optimizer': optimizer_two, 'frequency': 10}, ] In this example, the first optimizer will be used for the first 5 steps, the second optimizer for the next 10 steps and that cycle will continue. If an LR scheduler is specified for an optimizer using the ``lr_scheduler`` key in the above dict, the scheduler will only be updated when its optimizer is being used. Note: The lr_dict is a dictionary which contains the scheduler and its associated configuration. The default configuration is shown below. .. code-block:: python lr_dict = { 'scheduler': lr_scheduler, # The LR scheduler instance (required) 'interval': 'epoch', # The unit of the scheduler's step size 'frequency': 1, # The frequency of the scheduler 'reduce_on_plateau': False, # For ReduceLROnPlateau scheduler 'monitor': 'val_loss', # Metric for ReduceLROnPlateau to monitor 'strict': True, # Whether to crash the training if `monitor` is not found 'name': None, # Custom name for LearningRateMonitor to use } Only the ``"scheduler"`` key is required, the rest will be set to the defaults above. Note: The ``"frequency"`` value is an ``int`` corresponding to the number of sequential batches optimized with the specific optimizer. It should be given to none or to all of the optimizers. There is a difference between passing multiple optimizers in a list and passing multiple optimizers in dictionaries with a frequency of 1: In the former case, all optimizers will operate on the given batch in each optimization step. In the latter, only one optimizer will operate on the given batch at every step. Examples:: # most cases def configure_optimizers(self): return Adam(self.parameters(), lr=1e-3) # multiple optimizer case (e.g.: GAN) def configure_optimizers(self): gen_opt = Adam(self.model_gen.parameters(), lr=0.01) dis_opt = Adam(self.model_dis.parameters(), lr=0.02) return gen_opt, dis_opt # example with learning rate schedulers def configure_optimizers(self): gen_opt = Adam(self.model_gen.parameters(), lr=0.01) dis_opt = Adam(self.model_dis.parameters(), lr=0.02) dis_sch = CosineAnnealing(dis_opt, T_max=10) return [gen_opt, dis_opt], [dis_sch] # example with step-based learning rate schedulers def configure_optimizers(self): gen_opt = Adam(self.model_gen.parameters(), lr=0.01) dis_opt = Adam(self.model_dis.parameters(), lr=0.02) gen_sch = {'scheduler': ExponentialLR(gen_opt, 0.99), 'interval': 'step'} # called after each training step dis_sch = CosineAnnealing(dis_opt, T_max=10) # called every epoch return [gen_opt, dis_opt], [gen_sch, dis_sch] # example with optimizer frequencies # see training procedure in `Improved Training of Wasserstein GANs`, Algorithm 1 # https://arxiv.org/abs/1704.00028 def configure_optimizers(self): gen_opt = Adam(self.model_gen.parameters(), lr=0.01) dis_opt = Adam(self.model_dis.parameters(), lr=0.02) n_critic = 5 return ( {'optimizer': dis_opt, 'frequency': n_critic}, {'optimizer': gen_opt, 'frequency': 1} ) Note: Some things to know: - Lightning calls ``.backward()`` and ``.step()`` on each optimizer and learning rate scheduler as needed. - If you use 16-bit precision (``precision=16``), Lightning will automatically handle the optimizers. - If you use multiple optimizers, :meth:`training_step` will have an additional ``optimizer_idx`` parameter. - If you use :class:`torch.optim.LBFGS`, Lightning handles the closure function automatically for you. - If you use multiple optimizers, gradients will be calculated only for the parameters of current optimizer at each training step. - If you need to control how often those optimizers step or override the default ``.step()`` schedule, override the :meth:`optimizer_step` hook. - If you only want to call a learning rate scheduler every ``x`` step or epoch, or want to monitor a custom metric, you can specify these in a lr_dict: .. code-block:: python lr_dict = { 'scheduler': lr_scheduler, 'interval': 'step', # or 'epoch' 'monitor': 'val_f1', 'frequency': x, } """ rank_zero_warn("`configure_optimizers` must be implemented to be used with the Lightning Trainer") def manual_backward(self, loss: Tensor, optimizer: Optional[Optimizer] = None, *args, **kwargs) -> None: """ Call this directly from your training_step when doing optimizations manually. By using this we can ensure that all the proper scaling when using 16-bit etc has been done for you. This function forwards all args to the .backward() call as well. See :ref:`manual optimization<common/optimizers:Manual optimization>` for more examples. Example:: def training_step(...): opt = self.optimizers() loss = ... opt.zero_grad() # automatically applies scaling, etc... self.manual_backward(loss) opt.step() """ if optimizer is not None: rank_zero_deprecation( "`optimizer` argument to `manual_backward` is deprecated in v1.2 and will be removed in v1.4" ) # make sure we're using manual opt self._verify_is_manual_optimization('manual_backward') # backward self._running_manual_backward = True self.trainer.train_loop.backward(loss, optimizer=None, opt_idx=None, *args, **kwargs) self._running_manual_backward = False def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args, **kwargs) -> None: """ Override backward with your own implementation if you need to. Args: loss: Loss is already scaled by accumulated grads optimizer: Current optimizer being used optimizer_idx: Index of the current optimizer being used Called to perform backward step. Feel free to override as needed. The loss passed in has already been scaled for accumulated gradients if requested. Example:: def backward(self, loss, optimizer, optimizer_idx): loss.backward() """ if self.automatic_optimization or self._running_manual_backward: loss.backward(*args, **kwargs) def toggle_optimizer(self, optimizer: Optimizer, optimizer_idx: int): """ Makes sure only the gradients of the current optimizer's parameters are calculated in the training step to prevent dangling gradients in multiple-optimizer setup. .. note:: Only called when using multiple optimizers Override for your own behavior It works with ``untoggle_optimizer`` to make sure param_requires_grad_state is properly reset. Args: optimizer: Current optimizer used in training_loop optimizer_idx: Current optimizer idx in training_loop """ # Iterate over all optimizer parameters to preserve their `requires_grad` information # in case these are pre-defined during `configure_optimizers` param_requires_grad_state = {} for opt in self.optimizers(use_pl_optimizer=False): for group in opt.param_groups: for param in group['params']: # If a param already appear in param_requires_grad_state, continue if param in param_requires_grad_state: continue param_requires_grad_state[param] = param.requires_grad param.requires_grad = False # Then iterate over the current optimizer's parameters and set its `requires_grad` # properties accordingly for group in optimizer.param_groups: for param in group['params']: param.requires_grad = param_requires_grad_state[param] self._param_requires_grad_state = param_requires_grad_state def untoggle_optimizer(self, optimizer_idx: int): """ .. note:: Only called when using multiple optimizers Override for your own behavior Args: optimizer_idx: Current optimizer idx in training_loop """ for opt_idx, opt in enumerate(self.optimizers(use_pl_optimizer=False)): if optimizer_idx != opt_idx: for group in opt.param_groups: for param in group['params']: if param in self._param_requires_grad_state: param.requires_grad = self._param_requires_grad_state[param] # save memory self._param_requires_grad_state = dict() def optimizer_step( self, epoch: int = None, batch_idx: int = None, optimizer: Optimizer = None, optimizer_idx: int = None, optimizer_closure: Optional[Callable] = None, on_tpu: bool = None, using_native_amp: bool = None, using_lbfgs: bool = None, ) -> None: r""" Override this method to adjust the default way the :class:`~pytorch_lightning.trainer.trainer.Trainer` calls each optimizer. By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example once per optimizer. Warning: If you are overriding this method, make sure that you pass the ``optimizer_closure`` parameter to ``optimizer.step()`` function as shown in the examples. This ensures that ``training_step()``, ``optimizer.zero_grad()``, ``backward()`` are called within :meth:`~pytorch_lightning.trainer.training_loop.TrainLoop.run_training_batch`. Args: epoch: Current epoch batch_idx: Index of current batch optimizer: A PyTorch optimizer optimizer_idx: If you used multiple optimizers, this indexes into that list. optimizer_closure: Closure for all optimizers on_tpu: ``True`` if TPU backward is required using_native_amp: ``True`` if using native amp using_lbfgs: True if the matching optimizer is :class:`torch.optim.LBFGS` Examples:: # DEFAULT def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, on_tpu, using_native_amp, using_lbfgs): optimizer.step(closure=optimizer_closure) # Alternating schedule for optimizer steps (i.e.: GANs) def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, on_tpu, using_native_amp, using_lbfgs): # update generator opt every step if optimizer_idx == 0: optimizer.step(closure=optimizer_closure) # update discriminator opt every 2 steps if optimizer_idx == 1: if (batch_idx + 1) % 2 == 0 : optimizer.step(closure=optimizer_closure) # ... # add as many optimizers as you want Here's another example showing how to use this for more advanced things such as learning rate warm-up: .. code-block:: python # learning rate warm-up def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, on_tpu, using_native_amp, using_lbfgs): # warm up lr if self.trainer.global_step < 500: lr_scale = min(1., float(self.trainer.global_step + 1) / 500.) for pg in optimizer.param_groups: pg['lr'] = lr_scale * self.learning_rate # update params optimizer.step(closure=optimizer_closure) """ optimizer.step(closure=optimizer_closure) def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int): """Override this method to change the default behaviour of ``optimizer.zero_grad()``. Args: epoch: Current epoch batch_idx: Index of current batch optimizer: A PyTorch optimizer optimizer_idx: If you used multiple optimizers this indexes into that list. Examples:: # DEFAULT def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx): optimizer.zero_grad() # Set gradients to `None` instead of zero to improve performance. def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx): optimizer.zero_grad(set_to_none=True) See :meth:`torch.optim.Optimizer.zero_grad` for the explanation of the above example. """ optimizer.zero_grad() def tbptt_split_batch(self, batch: Tensor, split_size: int) -> list: r""" When using truncated backpropagation through time, each batch must be split along the time dimension. Lightning handles this by default, but for custom behavior override this function. Args: batch: Current batch split_size: The size of the split Return: List of batch splits. Each split will be passed to :meth:`training_step` to enable truncated back propagation through time. The default implementation splits root level Tensors and Sequences at dim=1 (i.e. time dim). It assumes that each time dim is the same length. Examples:: def tbptt_split_batch(self, batch, split_size): splits = [] for t in range(0, time_dims[0], split_size): batch_split = [] for i, x in enumerate(batch): if isinstance(x, torch.Tensor): split_x = x[:, t:t + split_size] elif isinstance(x, collections.Sequence): split_x = [None] * len(x) for batch_idx in range(len(x)): split_x[batch_idx] = x[batch_idx][t:t + split_size] batch_split.append(split_x) splits.append(batch_split) return splits Note: Called in the training loop after :meth:`~pytorch_lightning.callbacks.base.Callback.on_batch_start` if :paramref:`~pytorch_lightning.core.lightning.LightningModule.truncated_bptt_steps` > 0. Each returned batch split is passed separately to :meth:`training_step`. """ time_dims = [len(x[0]) for x in batch if isinstance(x, (torch.Tensor, collections.Sequence))] assert len(time_dims) >= 1, "Unable to determine batch time dimension" assert all(x == time_dims[0] for x in time_dims), "Batch time dimension length is ambiguous" splits = [] for t in range(0, time_dims[0], split_size): batch_split = [] for i, x in enumerate(batch): if isinstance(x, torch.Tensor): split_x = x[:, t:t + split_size] elif isinstance(x, collections.Sequence): split_x = [None] * len(x) for batch_idx in range(len(x)): split_x[batch_idx] = x[batch_idx][t:t + split_size] batch_split.append(split_x) splits.append(batch_split) return splits def summarize(self, mode: Optional[str] = ModelSummary.MODE_DEFAULT) -> Optional[ModelSummary]: model_summary = None if mode in ModelSummary.MODES: model_summary = ModelSummary(self, mode=mode) log.info("\n" + str(model_summary)) elif mode is not None: raise MisconfigurationException(f"`mode` can be None, {', '.join(ModelSummary.MODES)}, got {mode}") return model_summary def freeze(self) -> None: r""" Freeze all params for inference. Example:: model = MyLightningModule(...) model.freeze() """ for param in self.parameters(): param.requires_grad = False self.eval() def unfreeze(self) -> None: """ Unfreeze all parameters for training. .. code-block:: python model = MyLightningModule(...) model.unfreeze() """ for param in self.parameters(): param.requires_grad = True self.train() def get_progress_bar_dict(self) -> Dict[str, Union[int, str]]: r""" Implement this to override the default items displayed in the progress bar. By default it includes the average loss value, split index of BPTT (if used) and the version of the experiment when using a logger. .. code-block:: Epoch 1: 4%|▎ | 40/1095 [00:03<01:37, 10.84it/s, loss=4.501, v_num=10] Here is an example how to override the defaults: .. code-block:: python def get_progress_bar_dict(self): # don't show the version number items = super().get_progress_bar_dict() items.pop("v_num", None) return items Return: Dictionary with the items to be displayed in the progress bar. """ # call .item() only once but store elements without graphs running_train_loss = self.trainer.train_loop.running_loss.mean() avg_training_loss = None if running_train_loss is not None: avg_training_loss = running_train_loss.cpu().item() elif self.automatic_optimization: avg_training_loss = float('NaN') tqdm_dict = {} if avg_training_loss is not None: tqdm_dict["loss"] = f"{avg_training_loss:.3g}" module_tbptt_enabled = self.truncated_bptt_steps > 0 trainer_tbptt_enabled = self.trainer.truncated_bptt_steps is not None and self.trainer.truncated_bptt_steps > 0 if module_tbptt_enabled or trainer_tbptt_enabled: tqdm_dict["split_idx"] = self.trainer.split_idx if self.trainer.logger is not None and self.trainer.logger.version is not None: version = self.trainer.logger.version # show last 4 places of long version strings version = version[-4:] if isinstance(version, str) else version tqdm_dict["v_num"] = version return tqdm_dict def _verify_is_manual_optimization(self, fn_name): if self.automatic_optimization: raise MisconfigurationException( f'to use {fn_name}, please disable automatic optimization:' ' set model property `automatic_optimization` as False' ) @classmethod def _auto_collect_arguments(cls, frame=None) -> Tuple[Dict, Dict]: """ Collect all module arguments in the current constructor and all child constructors. The child constructors are all the ``__init__`` methods that reach the current class through (chained) ``super().__init__()`` calls. Args: frame: instance frame Returns: self_arguments: arguments dictionary of the first instance parents_arguments: arguments dictionary of the parent's instances """ if not frame: frame = inspect.currentframe() frame_args = collect_init_args(frame.f_back, []) self_arguments = frame_args[-1] # set hyper_parameters in child self_arguments = self_arguments parents_arguments = {} # add all arguments from parents for args in frame_args[:-1]: parents_arguments.update(args) return self_arguments, parents_arguments def save_hyperparameters( self, *args, ignore: Optional[Union[Sequence[str], str]] = None, frame: Optional[types.FrameType] = None ) -> None: """Save model arguments to ``hparams`` attribute. Args: args: single object of `dict`, `NameSpace` or `OmegaConf` or string names or arguments from class ``__init__`` ignore: an argument name or a list of argument names from class ``__init__`` to be ignored frame: a frame object. Default is None Example:: >>> class ManuallyArgsModel(LightningModule): ... def __init__(self, arg1, arg2, arg3): ... super().__init__() ... # manually assign arguments ... self.save_hyperparameters('arg1', 'arg3') ... def forward(self, *args, **kwargs): ... ... >>> model = ManuallyArgsModel(1, 'abc', 3.14) >>> model.hparams "arg1": 1 "arg3": 3.14 >>> class AutomaticArgsModel(LightningModule): ... def __init__(self, arg1, arg2, arg3): ... super().__init__() ... # equivalent automatic ... self.save_hyperparameters() ... def forward(self, *args, **kwargs): ... ... >>> model = AutomaticArgsModel(1, 'abc', 3.14) >>> model.hparams "arg1": 1 "arg2": abc "arg3": 3.14 >>> class SingleArgModel(LightningModule): ... def __init__(self, params): ... super().__init__() ... # manually assign single argument ... self.save_hyperparameters(params) ... def forward(self, *args, **kwargs): ... ... >>> model = SingleArgModel(Namespace(p1=1, p2='abc', p3=3.14)) >>> model.hparams "p1": 1 "p2": abc "p3": 3.14 >>> class ManuallyArgsModel(LightningModule): ... def __init__(self, arg1, arg2, arg3): ... super().__init__() ... # pass argument(s) to ignore as a string or in a list ... self.save_hyperparameters(ignore='arg2') ... def forward(self, *args, **kwargs): ... ... >>> model = ManuallyArgsModel(1, 'abc', 3.14) >>> model.hparams "arg1": 1 "arg3": 3.14 """ # the frame needs to be created in this file. if not frame: frame = inspect.currentframe().f_back save_hyperparameters(self, *args, ignore=ignore, frame=frame) def _set_hparams(self, hp: Union[dict, Namespace, str]) -> None: if isinstance(hp, Namespace): hp = vars(hp) if isinstance(hp, dict): hp = AttributeDict(hp) elif isinstance(hp, PRIMITIVE_TYPES): raise ValueError(f"Primitives {PRIMITIVE_TYPES} are not allowed.") elif not isinstance(hp, ALLOWED_CONFIG_TYPES): raise ValueError(f"Unsupported config type of {type(hp)}.") if isinstance(hp, dict) and isinstance(self.hparams, dict): self.hparams.update(hp) else: self._hparams = hp @torch.no_grad() def to_onnx( self, file_path: Union[str, Path], input_sample: Optional[Any] = None, **kwargs, ): """ Saves the model in ONNX format Args: file_path: The path of the file the onnx model should be saved to. input_sample: An input for tracing. Default: None (Use self.example_input_array) **kwargs: Will be passed to torch.onnx.export function. Example: >>> class SimpleModel(LightningModule): ... def __init__(self): ... super().__init__() ... self.l1 = torch.nn.Linear(in_features=64, out_features=4) ... ... def forward(self, x): ... return torch.relu(self.l1(x.view(x.size(0), -1))) >>> with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as tmpfile: ... model = SimpleModel() ... input_sample = torch.randn((1, 64)) ... model.to_onnx(tmpfile.name, input_sample, export_params=True) ... os.path.isfile(tmpfile.name) True """ mode = self.training if input_sample is None: if self.example_input_array is None: raise ValueError( "Could not export to ONNX since neither `input_sample` nor" " `model.example_input_array` attribute is set." ) input_sample = self.example_input_array input_sample = self._apply_batch_transfer_handler(input_sample) if "example_outputs" not in kwargs: self.eval() kwargs["example_outputs"] = self(input_sample) torch.onnx.export(self, input_sample, file_path, **kwargs) self.train(mode) @torch.no_grad() def to_torchscript( self, file_path: Optional[Union[str, Path]] = None, method: Optional[str] = 'script', example_inputs: Optional[Any] = None, **kwargs, ) -> Union[ScriptModule, Dict[str, ScriptModule]]: """ By default compiles the whole model to a :class:`~torch.jit.ScriptModule`. If you want to use tracing, please provided the argument `method='trace'` and make sure that either the example_inputs argument is provided, or the model has self.example_input_array set. If you would like to customize the modules that are scripted you should override this method. In case you want to return multiple modules, we recommend using a dictionary. Args: file_path: Path where to save the torchscript. Default: None (no file saved). method: Whether to use TorchScript's script or trace method. Default: 'script' example_inputs: An input to be used to do tracing when method is set to 'trace'. Default: None (Use self.example_input_array) **kwargs: Additional arguments that will be passed to the :func:`torch.jit.script` or :func:`torch.jit.trace` function. Note: - Requires the implementation of the :meth:`~pytorch_lightning.core.lightning.LightningModule.forward` method. - The exported script will be set to evaluation mode. - It is recommended that you install the latest supported version of PyTorch to use this feature without limitations. See also the :mod:`torch.jit` documentation for supported features. Example: >>> class SimpleModel(LightningModule): ... def __init__(self): ... super().__init__() ... self.l1 = torch.nn.Linear(in_features=64, out_features=4) ... ... def forward(self, x): ... return torch.relu(self.l1(x.view(x.size(0), -1))) ... >>> model = SimpleModel() >>> torch.jit.save(model.to_torchscript(), "model.pt") # doctest: +SKIP >>> os.path.isfile("model.pt") # doctest: +SKIP >>> torch.jit.save(model.to_torchscript(file_path="model_trace.pt", method='trace', # doctest: +SKIP ... example_inputs=torch.randn(1, 64))) # doctest: +SKIP >>> os.path.isfile("model_trace.pt") # doctest: +SKIP True Return: This LightningModule as a torchscript, regardless of whether file_path is defined or not. """ mode = self.training if method == 'script': torchscript_module = torch.jit.script(self.eval(), **kwargs) elif method == 'trace': # if no example inputs are provided, try to see if model has example_input_array set if example_inputs is None: if self.example_input_array is None: raise ValueError( 'Choosing method=`trace` requires either `example_inputs`' ' or `model.example_input_array` to be defined.' ) example_inputs = self.example_input_array # automatically send example inputs to the right device and use trace example_inputs = self._apply_batch_transfer_handler(example_inputs) torchscript_module = torch.jit.trace(func=self.eval(), example_inputs=example_inputs, **kwargs) else: raise ValueError(f"The 'method' parameter only supports 'script' or 'trace', but value given was: {method}") self.train(mode) if file_path is not None: torch.jit.save(torchscript_module, file_path) return torchscript_module @property def hparams(self) -> Union[AttributeDict, dict, Namespace]: if not hasattr(self, "_hparams"): self._hparams = AttributeDict() return self._hparams @property def hparams_initial(self) -> AttributeDict: if not hasattr(self, "_hparams_initial"): return AttributeDict() # prevent any change return copy.deepcopy(self._hparams_initial) @property def model_size(self) -> float: # todo: think about better way without need to dump model to drive tmp_name = f"{uuid.uuid4().hex}.pt" torch.save(self.state_dict(), tmp_name) size_mb = os.path.getsize(tmp_name) / 1e6 os.remove(tmp_name) return size_mb
import codecs import operator from typing import ( TYPE_CHECKING, Any, Callable, Collection, Dict, Iterable, NoReturn, Tuple, Union, ) from eth_typing import ( HexStr, ) from eth_utils.curried import ( apply_formatter_at_index, apply_formatter_if, apply_formatter_to_array, apply_formatters_to_dict, apply_formatters_to_sequence, apply_one_of_formatters, is_0x_prefixed, is_address, is_bytes, is_dict, is_integer, is_null, is_string, remove_0x_prefix, text_if_str, to_checksum_address, to_list, to_tuple, ) from eth_utils.toolz import ( complement, compose, curried, curry, partial, ) from hexbytes import ( HexBytes, ) from web3._utils.abi import ( is_length, ) from web3._utils.encoding import ( hexstr_if_str, to_hex, ) from web3._utils.filters import ( BlockFilter, LogFilter, TransactionFilter, ) from web3._utils.formatters import ( hex_to_integer, integer_to_hex, is_array_of_dicts, is_array_of_strings, remove_key_if, ) from web3._utils.normalizers import ( abi_address_to_hex, abi_bytes_to_hex, abi_int_to_hex, abi_string_to_hex, ) from web3._utils.rpc_abi import ( RPC, RPC_ABIS, abi_request_formatters, ) from web3.datastructures import ( AttributeDict, ) from web3.exceptions import ( BlockNotFound, ContractLogicError, InvalidParityMode, TransactionNotFound, ) from web3.types import ( BlockIdentifier, CallOverrideParams, RPCEndpoint, RPCResponse, TReturn, TxParams, _Hash32, ) if TYPE_CHECKING: from web3 import Web3 # noqa: F401 from web3.module import Module # noqa: F401 from web3.eth import Eth # noqa: F401 def bytes_to_ascii(value: bytes) -> str: return codecs.decode(value, 'ascii') to_ascii_if_bytes = apply_formatter_if(is_bytes, bytes_to_ascii) to_integer_if_hex = apply_formatter_if(is_string, hex_to_integer) to_hex_if_integer = apply_formatter_if(is_integer, integer_to_hex) is_false = partial(operator.is_, False) is_not_false = complement(is_false) is_not_null = complement(is_null) @curry def to_hexbytes( num_bytes: int, val: Union[str, int, bytes], variable_length: bool = False ) -> HexBytes: if isinstance(val, (str, int, bytes)): result = HexBytes(val) else: raise TypeError("Cannot convert %r to HexBytes" % val) extra_bytes = len(result) - num_bytes if extra_bytes == 0 or (variable_length and extra_bytes < 0): return result elif all(byte == 0 for byte in result[:extra_bytes]): return HexBytes(result[extra_bytes:]) else: raise ValueError( "The value %r is %d bytes, but should be %d" % ( result, len(result), num_bytes ) ) def is_attrdict(val: Any) -> bool: return isinstance(val, AttributeDict) not_attrdict = complement(is_attrdict) TRANSACTION_RESULT_FORMATTERS = { 'blockHash': apply_formatter_if(is_not_null, to_hexbytes(32)), 'blockNumber': apply_formatter_if(is_not_null, to_integer_if_hex), 'transactionIndex': apply_formatter_if(is_not_null, to_integer_if_hex), 'nonce': to_integer_if_hex, 'gas': to_integer_if_hex, 'gasPrice': to_integer_if_hex, 'maxFeePerGas': to_integer_if_hex, 'maxPriorityFeePerGas': to_integer_if_hex, 'value': to_integer_if_hex, 'from': to_checksum_address, 'publicKey': apply_formatter_if(is_not_null, to_hexbytes(64)), 'r': apply_formatter_if(is_not_null, to_hexbytes(32, variable_length=True)), 'raw': HexBytes, 's': apply_formatter_if(is_not_null, to_hexbytes(32, variable_length=True)), 'to': apply_formatter_if(is_address, to_checksum_address), 'hash': to_hexbytes(32), 'v': apply_formatter_if(is_not_null, to_integer_if_hex), 'standardV': apply_formatter_if(is_not_null, to_integer_if_hex), } transaction_result_formatter = apply_formatters_to_dict(TRANSACTION_RESULT_FORMATTERS) def apply_list_to_array_formatter(formatter: Any) -> Callable[..., Any]: return to_list(apply_formatter_to_array(formatter)) LOG_ENTRY_FORMATTERS = { 'blockHash': apply_formatter_if(is_not_null, to_hexbytes(32)), 'blockNumber': apply_formatter_if(is_not_null, to_integer_if_hex), 'transactionIndex': apply_formatter_if(is_not_null, to_integer_if_hex), 'transactionHash': apply_formatter_if(is_not_null, to_hexbytes(32)), 'logIndex': to_integer_if_hex, 'address': to_checksum_address, 'topics': apply_list_to_array_formatter(to_hexbytes(32)), 'data': to_ascii_if_bytes, } log_entry_formatter = apply_formatters_to_dict(LOG_ENTRY_FORMATTERS) RECEIPT_FORMATTERS = { 'blockHash': apply_formatter_if(is_not_null, to_hexbytes(32)), 'blockNumber': apply_formatter_if(is_not_null, to_integer_if_hex), 'transactionIndex': apply_formatter_if(is_not_null, to_integer_if_hex), 'transactionHash': to_hexbytes(32), 'cumulativeGasUsed': to_integer_if_hex, 'status': to_integer_if_hex, 'gasUsed': to_integer_if_hex, 'contractAddress': apply_formatter_if(is_not_null, to_checksum_address), 'logs': apply_list_to_array_formatter(log_entry_formatter), 'logsBloom': to_hexbytes(256), 'from': apply_formatter_if(is_not_null, to_checksum_address), 'to': apply_formatter_if(is_address, to_checksum_address), 'effectiveGasPrice': to_integer_if_hex, } receipt_formatter = apply_formatters_to_dict(RECEIPT_FORMATTERS) BLOCK_FORMATTERS = { 'baseFeePerGas': to_integer_if_hex, 'extraData': to_hexbytes(97, variable_length=True), 'gasLimit': to_integer_if_hex, 'gasUsed': to_integer_if_hex, 'size': to_integer_if_hex, 'timestamp': to_integer_if_hex, 'hash': apply_formatter_if(is_not_null, to_hexbytes(32)), 'logsBloom': apply_formatter_if(is_not_null, to_hexbytes(256)), 'miner': apply_formatter_if(is_not_null, to_checksum_address), 'mixHash': to_hexbytes(32), 'nonce': apply_formatter_if(is_not_null, to_hexbytes(8, variable_length=True)), 'number': apply_formatter_if(is_not_null, to_integer_if_hex), 'parentHash': apply_formatter_if(is_not_null, to_hexbytes(32)), 'sha3Uncles': apply_formatter_if(is_not_null, to_hexbytes(32)), 'uncles': apply_list_to_array_formatter(to_hexbytes(32)), 'difficulty': to_integer_if_hex, 'receiptsRoot': to_hexbytes(32), 'stateRoot': to_hexbytes(32), 'totalDifficulty': to_integer_if_hex, 'transactions': apply_one_of_formatters(( (is_array_of_dicts, apply_list_to_array_formatter(transaction_result_formatter)), (is_array_of_strings, apply_list_to_array_formatter(to_hexbytes(32))), )), 'transactionsRoot': to_hexbytes(32), } block_formatter = apply_formatters_to_dict(BLOCK_FORMATTERS) SYNCING_FORMATTERS = { 'startingBlock': to_integer_if_hex, 'currentBlock': to_integer_if_hex, 'highestBlock': to_integer_if_hex, 'knownStates': to_integer_if_hex, 'pulledStates': to_integer_if_hex, } syncing_formatter = apply_formatters_to_dict(SYNCING_FORMATTERS) TRANSACTION_POOL_CONTENT_FORMATTERS = { 'pending': compose( curried.keymap(to_ascii_if_bytes), curried.valmap(transaction_result_formatter), ), 'queued': compose( curried.keymap(to_ascii_if_bytes), curried.valmap(transaction_result_formatter), ), } transaction_pool_content_formatter = apply_formatters_to_dict( TRANSACTION_POOL_CONTENT_FORMATTERS ) TRANSACTION_POOL_INSPECT_FORMATTERS = { 'pending': curried.keymap(to_ascii_if_bytes), 'queued': curried.keymap(to_ascii_if_bytes), } transaction_pool_inspect_formatter = apply_formatters_to_dict( TRANSACTION_POOL_INSPECT_FORMATTERS ) FEE_HISTORY_FORMATTERS = { 'baseFeePerGas': apply_formatter_to_array(to_integer_if_hex), 'gasUsedRatio': apply_formatter_if(is_not_null, apply_formatter_to_array(float)), 'oldestBlock': to_integer_if_hex, 'reward': apply_formatter_if(is_not_null, apply_formatter_to_array( apply_formatter_to_array(to_integer_if_hex))), } fee_history_formatter = apply_formatters_to_dict(FEE_HISTORY_FORMATTERS) STORAGE_PROOF_FORMATTERS = { 'key': HexBytes, 'value': HexBytes, 'proof': apply_list_to_array_formatter(HexBytes), } ACCOUNT_PROOF_FORMATTERS = { 'address': to_checksum_address, 'accountProof': apply_list_to_array_formatter(HexBytes), 'balance': to_integer_if_hex, 'codeHash': to_hexbytes(32), 'nonce': to_integer_if_hex, 'storageHash': to_hexbytes(32), 'storageProof': apply_list_to_array_formatter( apply_formatters_to_dict(STORAGE_PROOF_FORMATTERS) ) } proof_formatter = apply_formatters_to_dict(ACCOUNT_PROOF_FORMATTERS) FILTER_PARAMS_FORMATTERS = { 'fromBlock': apply_formatter_if(is_integer, integer_to_hex), 'toBlock': apply_formatter_if(is_integer, integer_to_hex), } filter_params_formatter = apply_formatters_to_dict(FILTER_PARAMS_FORMATTERS) filter_result_formatter = apply_one_of_formatters(( (is_array_of_dicts, apply_list_to_array_formatter(log_entry_formatter)), (is_array_of_strings, apply_list_to_array_formatter(to_hexbytes(32))), )) TRANSACTION_REQUEST_FORMATTERS = { 'maxFeePerGas': to_hex_if_integer, 'maxPriorityFeePerGas': to_hex_if_integer, } transaction_request_formatter = apply_formatters_to_dict(TRANSACTION_REQUEST_FORMATTERS) transaction_param_formatter = compose( remove_key_if('to', lambda txn: txn['to'] in {'', b'', None}), remove_key_if('gasPrice', lambda txn: txn['gasPrice'] in {'', b'', None}), transaction_request_formatter, ) call_without_override: Callable[ [Tuple[TxParams, BlockIdentifier]], Tuple[Dict[str, Any], int] ] call_without_override = apply_formatters_to_sequence([ transaction_param_formatter, to_hex_if_integer, ]) call_with_override: Callable[ [Tuple[TxParams, BlockIdentifier, CallOverrideParams]], Tuple[Dict[str, Any], int, Dict[str, Any]], ] call_with_override = apply_formatters_to_sequence([ transaction_param_formatter, to_hex_if_integer, lambda x: x, ]) estimate_gas_without_block_id: Callable[[Dict[str, Any]], Dict[str, Any]] estimate_gas_without_block_id = apply_formatter_at_index(transaction_param_formatter, 0) estimate_gas_with_block_id: Callable[ [Tuple[Dict[str, Any], Union[str, int]]], Tuple[Dict[str, Any], int] ] estimate_gas_with_block_id = apply_formatters_to_sequence([ transaction_param_formatter, to_hex_if_integer, ]) SIGNED_TX_FORMATTER = { 'raw': HexBytes, 'tx': transaction_result_formatter, } signed_tx_formatter = apply_formatters_to_dict(SIGNED_TX_FORMATTER) FILTER_PARAM_NORMALIZERS = apply_formatters_to_dict({ 'address': apply_formatter_if(is_string, lambda x: [x]) }) GETH_WALLET_FORMATTER = { 'address': to_checksum_address } geth_wallet_formatter = apply_formatters_to_dict(GETH_WALLET_FORMATTER) GETH_WALLETS_FORMATTER = { 'accounts': apply_list_to_array_formatter(geth_wallet_formatter), } geth_wallets_formatter = apply_formatters_to_dict(GETH_WALLETS_FORMATTER) PYTHONIC_REQUEST_FORMATTERS: Dict[RPCEndpoint, Callable[..., Any]] = { # Eth RPC.eth_feeHistory: compose( apply_formatter_at_index(to_hex_if_integer, 0), apply_formatter_at_index(to_hex_if_integer, 1) ), RPC.eth_getBalance: apply_formatter_at_index(to_hex_if_integer, 1), RPC.eth_getBlockByNumber: apply_formatter_at_index(to_hex_if_integer, 0), RPC.eth_getBlockTransactionCountByNumber: apply_formatter_at_index( to_hex_if_integer, 0, ), RPC.eth_getCode: apply_formatter_at_index(to_hex_if_integer, 1), RPC.eth_getStorageAt: apply_formatter_at_index(to_hex_if_integer, 2), RPC.eth_getTransactionByBlockNumberAndIndex: compose( apply_formatter_at_index(to_hex_if_integer, 0), apply_formatter_at_index(to_hex_if_integer, 1), ), RPC.eth_getTransactionCount: apply_formatter_at_index(to_hex_if_integer, 1), RPC.eth_getRawTransactionByBlockNumberAndIndex: compose( apply_formatter_at_index(to_hex_if_integer, 0), apply_formatter_at_index(to_hex_if_integer, 1), ), RPC.eth_getRawTransactionByBlockHashAndIndex: apply_formatter_at_index(to_hex_if_integer, 1), RPC.eth_getUncleCountByBlockNumber: apply_formatter_at_index(to_hex_if_integer, 0), RPC.eth_getUncleByBlockNumberAndIndex: compose( apply_formatter_at_index(to_hex_if_integer, 0), apply_formatter_at_index(to_hex_if_integer, 1), ), RPC.eth_getUncleByBlockHashAndIndex: apply_formatter_at_index(to_hex_if_integer, 1), RPC.eth_newFilter: apply_formatter_at_index(filter_params_formatter, 0), RPC.eth_getLogs: apply_formatter_at_index(filter_params_formatter, 0), RPC.eth_call: apply_one_of_formatters(( (is_length(2), call_without_override), (is_length(3), call_with_override), )), RPC.eth_estimateGas: apply_one_of_formatters(( (is_length(1), estimate_gas_without_block_id), (is_length(2), estimate_gas_with_block_id), )), RPC.eth_sendTransaction: apply_formatter_at_index(transaction_param_formatter, 0), RPC.eth_signTransaction: apply_formatter_at_index(transaction_param_formatter, 0), RPC.eth_getProof: apply_formatter_at_index(to_hex_if_integer, 2), # personal RPC.personal_importRawKey: apply_formatter_at_index( compose(remove_0x_prefix, hexstr_if_str(to_hex)), 0, ), RPC.personal_sign: apply_formatter_at_index(text_if_str(to_hex), 0), RPC.personal_ecRecover: apply_formatter_at_index(text_if_str(to_hex), 0), RPC.personal_sendTransaction: apply_formatter_at_index(transaction_param_formatter, 0), # Snapshot and Revert RPC.evm_revert: apply_formatter_at_index(integer_to_hex, 0), RPC.trace_replayBlockTransactions: apply_formatter_at_index(to_hex_if_integer, 0), RPC.trace_block: apply_formatter_at_index(to_hex_if_integer, 0), RPC.trace_call: compose( apply_formatter_at_index(transaction_param_formatter, 0), apply_formatter_at_index(to_hex_if_integer, 2) ), } PYTHONIC_RESULT_FORMATTERS: Dict[RPCEndpoint, Callable[..., Any]] = { # Eth RPC.eth_accounts: apply_list_to_array_formatter(to_checksum_address), RPC.eth_blockNumber: to_integer_if_hex, RPC.eth_chainId: to_integer_if_hex, RPC.eth_coinbase: to_checksum_address, RPC.eth_call: HexBytes, RPC.eth_estimateGas: to_integer_if_hex, RPC.eth_feeHistory: fee_history_formatter, RPC.eth_maxPriorityFeePerGas: to_integer_if_hex, RPC.eth_gasPrice: to_integer_if_hex, RPC.eth_getBalance: to_integer_if_hex, RPC.eth_getBlockByHash: apply_formatter_if(is_not_null, block_formatter), RPC.eth_getBlockByNumber: apply_formatter_if(is_not_null, block_formatter), RPC.eth_getBlockTransactionCountByHash: to_integer_if_hex, RPC.eth_getBlockTransactionCountByNumber: to_integer_if_hex, RPC.eth_getCode: HexBytes, RPC.eth_getFilterChanges: filter_result_formatter, RPC.eth_getFilterLogs: filter_result_formatter, RPC.eth_getLogs: filter_result_formatter, RPC.eth_getProof: apply_formatter_if(is_not_null, proof_formatter), RPC.eth_getRawTransactionByBlockHashAndIndex: HexBytes, RPC.eth_getRawTransactionByBlockNumberAndIndex: HexBytes, RPC.eth_getRawTransactionByHash: HexBytes, RPC.eth_getStorageAt: HexBytes, RPC.eth_getTransactionByBlockHashAndIndex: apply_formatter_if( is_not_null, transaction_result_formatter, ), RPC.eth_getTransactionByBlockNumberAndIndex: apply_formatter_if( is_not_null, transaction_result_formatter, ), RPC.eth_getTransactionByHash: apply_formatter_if(is_not_null, transaction_result_formatter), RPC.eth_getTransactionCount: to_integer_if_hex, RPC.eth_getTransactionReceipt: apply_formatter_if( is_not_null, receipt_formatter, ), RPC.eth_getUncleCountByBlockHash: to_integer_if_hex, RPC.eth_getUncleCountByBlockNumber: to_integer_if_hex, RPC.eth_hashrate: to_integer_if_hex, RPC.eth_protocolVersion: compose( apply_formatter_if(is_0x_prefixed, to_integer_if_hex), apply_formatter_if(is_integer, str), ), RPC.eth_sendRawTransaction: to_hexbytes(32), RPC.eth_sendTransaction: to_hexbytes(32), RPC.eth_sign: HexBytes, RPC.eth_signTransaction: apply_formatter_if(is_not_null, signed_tx_formatter), RPC.eth_signTypedData: HexBytes, RPC.eth_syncing: apply_formatter_if(is_not_false, syncing_formatter), # personal RPC.personal_importRawKey: to_checksum_address, RPC.personal_listAccounts: apply_list_to_array_formatter(to_checksum_address), RPC.personal_listWallets: apply_list_to_array_formatter(geth_wallets_formatter), RPC.personal_newAccount: to_checksum_address, RPC.personal_sendTransaction: to_hexbytes(32), RPC.personal_signTypedData: HexBytes, # Transaction Pool RPC.txpool_content: transaction_pool_content_formatter, RPC.txpool_inspect: transaction_pool_inspect_formatter, # Snapshot and Revert RPC.evm_snapshot: hex_to_integer, # Net RPC.net_peerCount: to_integer_if_hex, } ATTRDICT_FORMATTER = { '*': apply_formatter_if(is_dict and not_attrdict, AttributeDict.recursive) } METHOD_NORMALIZERS: Dict[RPCEndpoint, Callable[..., Any]] = { RPC.eth_getLogs: apply_formatter_at_index(FILTER_PARAM_NORMALIZERS, 0), RPC.eth_newFilter: apply_formatter_at_index(FILTER_PARAM_NORMALIZERS, 0) } STANDARD_NORMALIZERS = [ abi_bytes_to_hex, abi_int_to_hex, abi_string_to_hex, abi_address_to_hex, ] ABI_REQUEST_FORMATTERS = abi_request_formatters(STANDARD_NORMALIZERS, RPC_ABIS) def raise_solidity_error_on_revert(response: RPCResponse) -> RPCResponse: """ Reverts contain a `data` attribute with the following layout: "Reverted " Function selector for Error(string): 08c379a (4 bytes) Data offset: 32 (32 bytes) String length (32 bytes) Reason string (padded, use string length from above to get meaningful part) See also https://solidity.readthedocs.io/en/v0.6.3/control-structures.html#revert """ if not isinstance(response['error'], dict): raise ValueError('Error expected to be a dict') data = response['error'].get('data', '') # Ganache case: if isinstance(data, dict) and response['error'].get('message'): raise ContractLogicError(f'execution reverted: {response['error']['message']}') # Parity/OpenEthereum case: if data.startswith('Reverted '): # "Reverted", function selector and offset are always the same for revert errors prefix = 'Reverted 0x08c379a00000000000000000000000000000000000000000000000000000000000000020' # noqa: 501 if not data.startswith(prefix): raise ContractLogicError('execution reverted') reason_length = int(data[len(prefix):len(prefix) + 64], 16) reason = data[len(prefix) + 64:len(prefix) + 64 + reason_length * 2] raise ContractLogicError(f'execution reverted: {bytes.fromhex(reason).decode('utf8')}') # Geth case: if 'message' in response['error'] and response['error'].get('code', '') == 3: raise ContractLogicError(response['error']['message']) # Geth Revert without error message case: if 'execution reverted' in response['error'].get('message'): raise ContractLogicError('execution reverted') return response def raise_invalid_parity_mode(response: RPCResponse) -> NoReturn: # eth-tester sends back an invalid RPCError, which makes mypy complain error_message = response['error'].get('message') # type: ignore raise InvalidParityMode(error_message) ERROR_FORMATTERS: Dict[RPCEndpoint, Callable[..., Any]] = { RPC.eth_estimateGas: raise_solidity_error_on_revert, RPC.eth_call: raise_solidity_error_on_revert, RPC.parity_setMode: raise_invalid_parity_mode, } @to_tuple def combine_formatters( formatter_maps: Collection[Dict[RPCEndpoint, Callable[..., TReturn]]], method_name: RPCEndpoint ) -> Iterable[Callable[..., TReturn]]: for formatter_map in formatter_maps: if method_name in formatter_map: yield formatter_map[method_name] def get_request_formatters( method_name: Union[RPCEndpoint, Callable[..., RPCEndpoint]] ) -> Dict[str, Callable[..., Any]]: request_formatter_maps = ( ABI_REQUEST_FORMATTERS, # METHOD_NORMALIZERS needs to be after ABI_REQUEST_FORMATTERS # so that eth_getLogs's apply_formatter_at_index formatter # is applied to the whole address # rather than on the first byte of the address METHOD_NORMALIZERS, PYTHONIC_REQUEST_FORMATTERS, ) formatters = combine_formatters(request_formatter_maps, method_name) return compose(*formatters) def raise_block_not_found(params: Tuple[BlockIdentifier, bool]) -> NoReturn: try: block_identifier = params[0] message = f"Block with id: {block_identifier!r} not found." except IndexError: message = "Unknown block identifier" raise BlockNotFound(message) def raise_block_not_found_for_uncle_at_index( params: Tuple[BlockIdentifier, Union[HexStr, int]] ) -> NoReturn: try: block_identifier = params[0] uncle_index = to_integer_if_hex(params[1]) message = ( f"Uncle at index: {uncle_index} of block with id: " f"{block_identifier!r} not found." ) except IndexError: message = "Unknown block identifier or uncle index" raise BlockNotFound(message) def raise_transaction_not_found(params: Tuple[_Hash32]) -> NoReturn: try: transaction_hash = params[0] message = f"Transaction with hash: {transaction_hash!r} not found." except IndexError: message = "Unknown transaction hash" raise TransactionNotFound(message) def raise_transaction_not_found_with_index(params: Tuple[BlockIdentifier, int]) -> NoReturn: try: block_identifier = params[0] transaction_index = to_integer_if_hex(params[1]) message = ( f"Transaction index: {transaction_index} " f"on block id: {block_identifier!r} not found." ) except IndexError: message = "Unknown transaction index or block identifier" raise TransactionNotFound(message) NULL_RESULT_FORMATTERS: Dict[RPCEndpoint, Callable[..., Any]] = { RPC.eth_getBlockByHash: raise_block_not_found, RPC.eth_getBlockByNumber: raise_block_not_found, RPC.eth_getBlockTransactionCountByHash: raise_block_not_found, RPC.eth_getBlockTransactionCountByNumber: raise_block_not_found, RPC.eth_getUncleCountByBlockHash: raise_block_not_found, RPC.eth_getUncleCountByBlockNumber: raise_block_not_found, RPC.eth_getUncleByBlockHashAndIndex: raise_block_not_found_for_uncle_at_index, RPC.eth_getUncleByBlockNumberAndIndex: raise_block_not_found_for_uncle_at_index, RPC.eth_getTransactionByHash: raise_transaction_not_found, RPC.eth_getTransactionByBlockHashAndIndex: raise_transaction_not_found_with_index, RPC.eth_getTransactionByBlockNumberAndIndex: raise_transaction_not_found_with_index, RPC.eth_getTransactionReceipt: raise_transaction_not_found, RPC.eth_getRawTransactionByBlockHashAndIndex: raise_transaction_not_found_with_index, RPC.eth_getRawTransactionByBlockNumberAndIndex: raise_transaction_not_found_with_index, RPC.eth_getRawTransactionByHash: raise_transaction_not_found, } def filter_wrapper( module: "Eth", method: RPCEndpoint, filter_id: HexStr, ) -> Union[BlockFilter, TransactionFilter, LogFilter]: if method == RPC.eth_newBlockFilter: return BlockFilter(filter_id, eth_module=module) elif method == RPC.eth_newPendingTransactionFilter: return TransactionFilter(filter_id, eth_module=module) elif method == RPC.eth_newFilter: return LogFilter(filter_id, eth_module=module) else: raise NotImplementedError('Filter wrapper needs to be used with either ' f'{RPC.eth_newBlockFilter}, {RPC.eth_newPendingTransactionFilter}' f' or {RPC.eth_newFilter}') FILTER_RESULT_FORMATTERS: Dict[RPCEndpoint, Callable[..., Any]] = { RPC.eth_newPendingTransactionFilter: filter_wrapper, RPC.eth_newBlockFilter: filter_wrapper, RPC.eth_newFilter: filter_wrapper, } @to_tuple def apply_module_to_formatters( formatters: Tuple[Callable[..., TReturn]], module: "Module", method_name: Union[RPCEndpoint, Callable[..., RPCEndpoint]], ) -> Iterable[Callable[..., TReturn]]: for f in formatters: yield partial(f, module, method_name) def get_result_formatters( method_name: Union[RPCEndpoint, Callable[..., RPCEndpoint]], module: "Module", ) -> Dict[str, Callable[..., Any]]: formatters = combine_formatters( (PYTHONIC_RESULT_FORMATTERS,), method_name ) formatters_requiring_module = combine_formatters( (FILTER_RESULT_FORMATTERS,), method_name ) partial_formatters = apply_module_to_formatters( formatters_requiring_module, module, method_name ) attrdict_formatter = apply_formatter_if(is_dict and not_attrdict, AttributeDict.recursive) return compose(*partial_formatters, attrdict_formatter, *formatters) def get_error_formatters( method_name: Union[RPCEndpoint, Callable[..., RPCEndpoint]] ) -> Callable[..., Any]: # Note error formatters work on the full response dict error_formatter_maps = (ERROR_FORMATTERS,) formatters = combine_formatters(error_formatter_maps, method_name) return compose(*formatters) def get_null_result_formatters( method_name: Union[RPCEndpoint, Callable[..., RPCEndpoint]] ) -> Callable[..., Any]: formatters = combine_formatters((NULL_RESULT_FORMATTERS,), method_name) return compose(*formatters)
import codecs import operator from typing import ( TYPE_CHECKING, Any, Callable, Collection, Dict, Iterable, NoReturn, Tuple, Union, ) from eth_typing import ( HexStr, ) from eth_utils.curried import ( apply_formatter_at_index, apply_formatter_if, apply_formatter_to_array, apply_formatters_to_dict, apply_formatters_to_sequence, apply_one_of_formatters, is_0x_prefixed, is_address, is_bytes, is_dict, is_integer, is_null, is_string, remove_0x_prefix, text_if_str, to_checksum_address, to_list, to_tuple, ) from eth_utils.toolz import ( complement, compose, curried, curry, partial, ) from hexbytes import ( HexBytes, ) from web3._utils.abi import ( is_length, ) from web3._utils.encoding import ( hexstr_if_str, to_hex, ) from web3._utils.filters import ( BlockFilter, LogFilter, TransactionFilter, ) from web3._utils.formatters import ( hex_to_integer, integer_to_hex, is_array_of_dicts, is_array_of_strings, remove_key_if, ) from web3._utils.normalizers import ( abi_address_to_hex, abi_bytes_to_hex, abi_int_to_hex, abi_string_to_hex, ) from web3._utils.rpc_abi import ( RPC, RPC_ABIS, abi_request_formatters, ) from web3.datastructures import ( AttributeDict, ) from web3.exceptions import ( BlockNotFound, ContractLogicError, InvalidParityMode, TransactionNotFound, ) from web3.types import ( BlockIdentifier, CallOverrideParams, RPCEndpoint, RPCResponse, TReturn, TxParams, _Hash32, ) if TYPE_CHECKING: from web3 import Web3 # noqa: F401 from web3.module import Module # noqa: F401 from web3.eth import Eth # noqa: F401 def bytes_to_ascii(value: bytes) -> str: return codecs.decode(value, 'ascii') to_ascii_if_bytes = apply_formatter_if(is_bytes, bytes_to_ascii) to_integer_if_hex = apply_formatter_if(is_string, hex_to_integer) to_hex_if_integer = apply_formatter_if(is_integer, integer_to_hex) is_false = partial(operator.is_, False) is_not_false = complement(is_false) is_not_null = complement(is_null) @curry def to_hexbytes( num_bytes: int, val: Union[str, int, bytes], variable_length: bool = False ) -> HexBytes: if isinstance(val, (str, int, bytes)): result = HexBytes(val) else: raise TypeError("Cannot convert %r to HexBytes" % val) extra_bytes = len(result) - num_bytes if extra_bytes == 0 or (variable_length and extra_bytes < 0): return result elif all(byte == 0 for byte in result[:extra_bytes]): return HexBytes(result[extra_bytes:]) else: raise ValueError( "The value %r is %d bytes, but should be %d" % ( result, len(result), num_bytes ) ) def is_attrdict(val: Any) -> bool: return isinstance(val, AttributeDict) not_attrdict = complement(is_attrdict) TRANSACTION_RESULT_FORMATTERS = { 'blockHash': apply_formatter_if(is_not_null, to_hexbytes(32)), 'blockNumber': apply_formatter_if(is_not_null, to_integer_if_hex), 'transactionIndex': apply_formatter_if(is_not_null, to_integer_if_hex), 'nonce': to_integer_if_hex, 'gas': to_integer_if_hex, 'gasPrice': to_integer_if_hex, 'maxFeePerGas': to_integer_if_hex, 'maxPriorityFeePerGas': to_integer_if_hex, 'value': to_integer_if_hex, 'from': to_checksum_address, 'publicKey': apply_formatter_if(is_not_null, to_hexbytes(64)), 'r': apply_formatter_if(is_not_null, to_hexbytes(32, variable_length=True)), 'raw': HexBytes, 's': apply_formatter_if(is_not_null, to_hexbytes(32, variable_length=True)), 'to': apply_formatter_if(is_address, to_checksum_address), 'hash': to_hexbytes(32), 'v': apply_formatter_if(is_not_null, to_integer_if_hex), 'standardV': apply_formatter_if(is_not_null, to_integer_if_hex), } transaction_result_formatter = apply_formatters_to_dict(TRANSACTION_RESULT_FORMATTERS) def apply_list_to_array_formatter(formatter: Any) -> Callable[..., Any]: return to_list(apply_formatter_to_array(formatter)) LOG_ENTRY_FORMATTERS = { 'blockHash': apply_formatter_if(is_not_null, to_hexbytes(32)), 'blockNumber': apply_formatter_if(is_not_null, to_integer_if_hex), 'transactionIndex': apply_formatter_if(is_not_null, to_integer_if_hex), 'transactionHash': apply_formatter_if(is_not_null, to_hexbytes(32)), 'logIndex': to_integer_if_hex, 'address': to_checksum_address, 'topics': apply_list_to_array_formatter(to_hexbytes(32)), 'data': to_ascii_if_bytes, } log_entry_formatter = apply_formatters_to_dict(LOG_ENTRY_FORMATTERS) RECEIPT_FORMATTERS = { 'blockHash': apply_formatter_if(is_not_null, to_hexbytes(32)), 'blockNumber': apply_formatter_if(is_not_null, to_integer_if_hex), 'transactionIndex': apply_formatter_if(is_not_null, to_integer_if_hex), 'transactionHash': to_hexbytes(32), 'cumulativeGasUsed': to_integer_if_hex, 'status': to_integer_if_hex, 'gasUsed': to_integer_if_hex, 'contractAddress': apply_formatter_if(is_not_null, to_checksum_address), 'logs': apply_list_to_array_formatter(log_entry_formatter), 'logsBloom': to_hexbytes(256), 'from': apply_formatter_if(is_not_null, to_checksum_address), 'to': apply_formatter_if(is_address, to_checksum_address), 'effectiveGasPrice': to_integer_if_hex, } receipt_formatter = apply_formatters_to_dict(RECEIPT_FORMATTERS) BLOCK_FORMATTERS = { 'baseFeePerGas': to_integer_if_hex, 'extraData': to_hexbytes(97, variable_length=True), 'gasLimit': to_integer_if_hex, 'gasUsed': to_integer_if_hex, 'size': to_integer_if_hex, 'timestamp': to_integer_if_hex, 'hash': apply_formatter_if(is_not_null, to_hexbytes(32)), 'logsBloom': apply_formatter_if(is_not_null, to_hexbytes(256)), 'miner': apply_formatter_if(is_not_null, to_checksum_address), 'mixHash': to_hexbytes(32), 'nonce': apply_formatter_if(is_not_null, to_hexbytes(8, variable_length=True)), 'number': apply_formatter_if(is_not_null, to_integer_if_hex), 'parentHash': apply_formatter_if(is_not_null, to_hexbytes(32)), 'sha3Uncles': apply_formatter_if(is_not_null, to_hexbytes(32)), 'uncles': apply_list_to_array_formatter(to_hexbytes(32)), 'difficulty': to_integer_if_hex, 'receiptsRoot': to_hexbytes(32), 'stateRoot': to_hexbytes(32), 'totalDifficulty': to_integer_if_hex, 'transactions': apply_one_of_formatters(( (is_array_of_dicts, apply_list_to_array_formatter(transaction_result_formatter)), (is_array_of_strings, apply_list_to_array_formatter(to_hexbytes(32))), )), 'transactionsRoot': to_hexbytes(32), } block_formatter = apply_formatters_to_dict(BLOCK_FORMATTERS) SYNCING_FORMATTERS = { 'startingBlock': to_integer_if_hex, 'currentBlock': to_integer_if_hex, 'highestBlock': to_integer_if_hex, 'knownStates': to_integer_if_hex, 'pulledStates': to_integer_if_hex, } syncing_formatter = apply_formatters_to_dict(SYNCING_FORMATTERS) TRANSACTION_POOL_CONTENT_FORMATTERS = { 'pending': compose( curried.keymap(to_ascii_if_bytes), curried.valmap(transaction_result_formatter), ), 'queued': compose( curried.keymap(to_ascii_if_bytes), curried.valmap(transaction_result_formatter), ), } transaction_pool_content_formatter = apply_formatters_to_dict( TRANSACTION_POOL_CONTENT_FORMATTERS ) TRANSACTION_POOL_INSPECT_FORMATTERS = { 'pending': curried.keymap(to_ascii_if_bytes), 'queued': curried.keymap(to_ascii_if_bytes), } transaction_pool_inspect_formatter = apply_formatters_to_dict( TRANSACTION_POOL_INSPECT_FORMATTERS ) FEE_HISTORY_FORMATTERS = { 'baseFeePerGas': apply_formatter_to_array(to_integer_if_hex), 'gasUsedRatio': apply_formatter_if(is_not_null, apply_formatter_to_array(float)), 'oldestBlock': to_integer_if_hex, 'reward': apply_formatter_if(is_not_null, apply_formatter_to_array( apply_formatter_to_array(to_integer_if_hex))), } fee_history_formatter = apply_formatters_to_dict(FEE_HISTORY_FORMATTERS) STORAGE_PROOF_FORMATTERS = { 'key': HexBytes, 'value': HexBytes, 'proof': apply_list_to_array_formatter(HexBytes), } ACCOUNT_PROOF_FORMATTERS = { 'address': to_checksum_address, 'accountProof': apply_list_to_array_formatter(HexBytes), 'balance': to_integer_if_hex, 'codeHash': to_hexbytes(32), 'nonce': to_integer_if_hex, 'storageHash': to_hexbytes(32), 'storageProof': apply_list_to_array_formatter( apply_formatters_to_dict(STORAGE_PROOF_FORMATTERS) ) } proof_formatter = apply_formatters_to_dict(ACCOUNT_PROOF_FORMATTERS) FILTER_PARAMS_FORMATTERS = { 'fromBlock': apply_formatter_if(is_integer, integer_to_hex), 'toBlock': apply_formatter_if(is_integer, integer_to_hex), } filter_params_formatter = apply_formatters_to_dict(FILTER_PARAMS_FORMATTERS) filter_result_formatter = apply_one_of_formatters(( (is_array_of_dicts, apply_list_to_array_formatter(log_entry_formatter)), (is_array_of_strings, apply_list_to_array_formatter(to_hexbytes(32))), )) TRANSACTION_REQUEST_FORMATTERS = { 'maxFeePerGas': to_hex_if_integer, 'maxPriorityFeePerGas': to_hex_if_integer, } transaction_request_formatter = apply_formatters_to_dict(TRANSACTION_REQUEST_FORMATTERS) transaction_param_formatter = compose( remove_key_if('to', lambda txn: txn['to'] in {'', b'', None}), remove_key_if('gasPrice', lambda txn: txn['gasPrice'] in {'', b'', None}), transaction_request_formatter, ) call_without_override: Callable[ [Tuple[TxParams, BlockIdentifier]], Tuple[Dict[str, Any], int] ] call_without_override = apply_formatters_to_sequence([ transaction_param_formatter, to_hex_if_integer, ]) call_with_override: Callable[ [Tuple[TxParams, BlockIdentifier, CallOverrideParams]], Tuple[Dict[str, Any], int, Dict[str, Any]], ] call_with_override = apply_formatters_to_sequence([ transaction_param_formatter, to_hex_if_integer, lambda x: x, ]) estimate_gas_without_block_id: Callable[[Dict[str, Any]], Dict[str, Any]] estimate_gas_without_block_id = apply_formatter_at_index(transaction_param_formatter, 0) estimate_gas_with_block_id: Callable[ [Tuple[Dict[str, Any], Union[str, int]]], Tuple[Dict[str, Any], int] ] estimate_gas_with_block_id = apply_formatters_to_sequence([ transaction_param_formatter, to_hex_if_integer, ]) SIGNED_TX_FORMATTER = { 'raw': HexBytes, 'tx': transaction_result_formatter, } signed_tx_formatter = apply_formatters_to_dict(SIGNED_TX_FORMATTER) FILTER_PARAM_NORMALIZERS = apply_formatters_to_dict({ 'address': apply_formatter_if(is_string, lambda x: [x]) }) GETH_WALLET_FORMATTER = { 'address': to_checksum_address } geth_wallet_formatter = apply_formatters_to_dict(GETH_WALLET_FORMATTER) GETH_WALLETS_FORMATTER = { 'accounts': apply_list_to_array_formatter(geth_wallet_formatter), } geth_wallets_formatter = apply_formatters_to_dict(GETH_WALLETS_FORMATTER) PYTHONIC_REQUEST_FORMATTERS: Dict[RPCEndpoint, Callable[..., Any]] = { # Eth RPC.eth_feeHistory: compose( apply_formatter_at_index(to_hex_if_integer, 0), apply_formatter_at_index(to_hex_if_integer, 1) ), RPC.eth_getBalance: apply_formatter_at_index(to_hex_if_integer, 1), RPC.eth_getBlockByNumber: apply_formatter_at_index(to_hex_if_integer, 0), RPC.eth_getBlockTransactionCountByNumber: apply_formatter_at_index( to_hex_if_integer, 0, ), RPC.eth_getCode: apply_formatter_at_index(to_hex_if_integer, 1), RPC.eth_getStorageAt: apply_formatter_at_index(to_hex_if_integer, 2), RPC.eth_getTransactionByBlockNumberAndIndex: compose( apply_formatter_at_index(to_hex_if_integer, 0), apply_formatter_at_index(to_hex_if_integer, 1), ), RPC.eth_getTransactionCount: apply_formatter_at_index(to_hex_if_integer, 1), RPC.eth_getRawTransactionByBlockNumberAndIndex: compose( apply_formatter_at_index(to_hex_if_integer, 0), apply_formatter_at_index(to_hex_if_integer, 1), ), RPC.eth_getRawTransactionByBlockHashAndIndex: apply_formatter_at_index(to_hex_if_integer, 1), RPC.eth_getUncleCountByBlockNumber: apply_formatter_at_index(to_hex_if_integer, 0), RPC.eth_getUncleByBlockNumberAndIndex: compose( apply_formatter_at_index(to_hex_if_integer, 0), apply_formatter_at_index(to_hex_if_integer, 1), ), RPC.eth_getUncleByBlockHashAndIndex: apply_formatter_at_index(to_hex_if_integer, 1), RPC.eth_newFilter: apply_formatter_at_index(filter_params_formatter, 0), RPC.eth_getLogs: apply_formatter_at_index(filter_params_formatter, 0), RPC.eth_call: apply_one_of_formatters(( (is_length(2), call_without_override), (is_length(3), call_with_override), )), RPC.eth_estimateGas: apply_one_of_formatters(( (is_length(1), estimate_gas_without_block_id), (is_length(2), estimate_gas_with_block_id), )), RPC.eth_sendTransaction: apply_formatter_at_index(transaction_param_formatter, 0), RPC.eth_signTransaction: apply_formatter_at_index(transaction_param_formatter, 0), RPC.eth_getProof: apply_formatter_at_index(to_hex_if_integer, 2), # personal RPC.personal_importRawKey: apply_formatter_at_index( compose(remove_0x_prefix, hexstr_if_str(to_hex)), 0, ), RPC.personal_sign: apply_formatter_at_index(text_if_str(to_hex), 0), RPC.personal_ecRecover: apply_formatter_at_index(text_if_str(to_hex), 0), RPC.personal_sendTransaction: apply_formatter_at_index(transaction_param_formatter, 0), # Snapshot and Revert RPC.evm_revert: apply_formatter_at_index(integer_to_hex, 0), RPC.trace_replayBlockTransactions: apply_formatter_at_index(to_hex_if_integer, 0), RPC.trace_block: apply_formatter_at_index(to_hex_if_integer, 0), RPC.trace_call: compose( apply_formatter_at_index(transaction_param_formatter, 0), apply_formatter_at_index(to_hex_if_integer, 2) ), } PYTHONIC_RESULT_FORMATTERS: Dict[RPCEndpoint, Callable[..., Any]] = { # Eth RPC.eth_accounts: apply_list_to_array_formatter(to_checksum_address), RPC.eth_blockNumber: to_integer_if_hex, RPC.eth_chainId: to_integer_if_hex, RPC.eth_coinbase: to_checksum_address, RPC.eth_call: HexBytes, RPC.eth_estimateGas: to_integer_if_hex, RPC.eth_feeHistory: fee_history_formatter, RPC.eth_maxPriorityFeePerGas: to_integer_if_hex, RPC.eth_gasPrice: to_integer_if_hex, RPC.eth_getBalance: to_integer_if_hex, RPC.eth_getBlockByHash: apply_formatter_if(is_not_null, block_formatter), RPC.eth_getBlockByNumber: apply_formatter_if(is_not_null, block_formatter), RPC.eth_getBlockTransactionCountByHash: to_integer_if_hex, RPC.eth_getBlockTransactionCountByNumber: to_integer_if_hex, RPC.eth_getCode: HexBytes, RPC.eth_getFilterChanges: filter_result_formatter, RPC.eth_getFilterLogs: filter_result_formatter, RPC.eth_getLogs: filter_result_formatter, RPC.eth_getProof: apply_formatter_if(is_not_null, proof_formatter), RPC.eth_getRawTransactionByBlockHashAndIndex: HexBytes, RPC.eth_getRawTransactionByBlockNumberAndIndex: HexBytes, RPC.eth_getRawTransactionByHash: HexBytes, RPC.eth_getStorageAt: HexBytes, RPC.eth_getTransactionByBlockHashAndIndex: apply_formatter_if( is_not_null, transaction_result_formatter, ), RPC.eth_getTransactionByBlockNumberAndIndex: apply_formatter_if( is_not_null, transaction_result_formatter, ), RPC.eth_getTransactionByHash: apply_formatter_if(is_not_null, transaction_result_formatter), RPC.eth_getTransactionCount: to_integer_if_hex, RPC.eth_getTransactionReceipt: apply_formatter_if( is_not_null, receipt_formatter, ), RPC.eth_getUncleCountByBlockHash: to_integer_if_hex, RPC.eth_getUncleCountByBlockNumber: to_integer_if_hex, RPC.eth_hashrate: to_integer_if_hex, RPC.eth_protocolVersion: compose( apply_formatter_if(is_0x_prefixed, to_integer_if_hex), apply_formatter_if(is_integer, str), ), RPC.eth_sendRawTransaction: to_hexbytes(32), RPC.eth_sendTransaction: to_hexbytes(32), RPC.eth_sign: HexBytes, RPC.eth_signTransaction: apply_formatter_if(is_not_null, signed_tx_formatter), RPC.eth_signTypedData: HexBytes, RPC.eth_syncing: apply_formatter_if(is_not_false, syncing_formatter), # personal RPC.personal_importRawKey: to_checksum_address, RPC.personal_listAccounts: apply_list_to_array_formatter(to_checksum_address), RPC.personal_listWallets: apply_list_to_array_formatter(geth_wallets_formatter), RPC.personal_newAccount: to_checksum_address, RPC.personal_sendTransaction: to_hexbytes(32), RPC.personal_signTypedData: HexBytes, # Transaction Pool RPC.txpool_content: transaction_pool_content_formatter, RPC.txpool_inspect: transaction_pool_inspect_formatter, # Snapshot and Revert RPC.evm_snapshot: hex_to_integer, # Net RPC.net_peerCount: to_integer_if_hex, } ATTRDICT_FORMATTER = { '*': apply_formatter_if(is_dict and not_attrdict, AttributeDict.recursive) } METHOD_NORMALIZERS: Dict[RPCEndpoint, Callable[..., Any]] = { RPC.eth_getLogs: apply_formatter_at_index(FILTER_PARAM_NORMALIZERS, 0), RPC.eth_newFilter: apply_formatter_at_index(FILTER_PARAM_NORMALIZERS, 0) } STANDARD_NORMALIZERS = [ abi_bytes_to_hex, abi_int_to_hex, abi_string_to_hex, abi_address_to_hex, ] ABI_REQUEST_FORMATTERS = abi_request_formatters(STANDARD_NORMALIZERS, RPC_ABIS) def raise_solidity_error_on_revert(response: RPCResponse) -> RPCResponse: """ Reverts contain a `data` attribute with the following layout: "Reverted " Function selector for Error(string): 08c379a (4 bytes) Data offset: 32 (32 bytes) String length (32 bytes) Reason string (padded, use string length from above to get meaningful part) See also https://solidity.readthedocs.io/en/v0.6.3/control-structures.html#revert """ if not isinstance(response['error'], dict): raise ValueError('Error expected to be a dict') data = response['error'].get('data', '') # Ganache case: if isinstance(data, dict) and response['error'].get('message'): raise ContractLogicError(f'execution reverted: {response["error"]["message"]}') # Parity/OpenEthereum case: if data.startswith('Reverted '): # "Reverted", function selector and offset are always the same for revert errors prefix = 'Reverted 0x08c379a00000000000000000000000000000000000000000000000000000000000000020' # noqa: 501 if not data.startswith(prefix): raise ContractLogicError('execution reverted') reason_length = int(data[len(prefix):len(prefix) + 64], 16) reason = data[len(prefix) + 64:len(prefix) + 64 + reason_length * 2] raise ContractLogicError(f'execution reverted: {bytes.fromhex(reason).decode("utf8")}') # Geth case: if 'message' in response['error'] and response['error'].get('code', '') == 3: raise ContractLogicError(response['error']['message']) # Geth Revert without error message case: if 'execution reverted' in response['error'].get('message'): raise ContractLogicError('execution reverted') return response def raise_invalid_parity_mode(response: RPCResponse) -> NoReturn: # eth-tester sends back an invalid RPCError, which makes mypy complain error_message = response['error'].get('message') # type: ignore raise InvalidParityMode(error_message) ERROR_FORMATTERS: Dict[RPCEndpoint, Callable[..., Any]] = { RPC.eth_estimateGas: raise_solidity_error_on_revert, RPC.eth_call: raise_solidity_error_on_revert, RPC.parity_setMode: raise_invalid_parity_mode, } @to_tuple def combine_formatters( formatter_maps: Collection[Dict[RPCEndpoint, Callable[..., TReturn]]], method_name: RPCEndpoint ) -> Iterable[Callable[..., TReturn]]: for formatter_map in formatter_maps: if method_name in formatter_map: yield formatter_map[method_name] def get_request_formatters( method_name: Union[RPCEndpoint, Callable[..., RPCEndpoint]] ) -> Dict[str, Callable[..., Any]]: request_formatter_maps = ( ABI_REQUEST_FORMATTERS, # METHOD_NORMALIZERS needs to be after ABI_REQUEST_FORMATTERS # so that eth_getLogs's apply_formatter_at_index formatter # is applied to the whole address # rather than on the first byte of the address METHOD_NORMALIZERS, PYTHONIC_REQUEST_FORMATTERS, ) formatters = combine_formatters(request_formatter_maps, method_name) return compose(*formatters) def raise_block_not_found(params: Tuple[BlockIdentifier, bool]) -> NoReturn: try: block_identifier = params[0] message = f"Block with id: {block_identifier!r} not found." except IndexError: message = "Unknown block identifier" raise BlockNotFound(message) def raise_block_not_found_for_uncle_at_index( params: Tuple[BlockIdentifier, Union[HexStr, int]] ) -> NoReturn: try: block_identifier = params[0] uncle_index = to_integer_if_hex(params[1]) message = ( f"Uncle at index: {uncle_index} of block with id: " f"{block_identifier!r} not found." ) except IndexError: message = "Unknown block identifier or uncle index" raise BlockNotFound(message) def raise_transaction_not_found(params: Tuple[_Hash32]) -> NoReturn: try: transaction_hash = params[0] message = f"Transaction with hash: {transaction_hash!r} not found." except IndexError: message = "Unknown transaction hash" raise TransactionNotFound(message) def raise_transaction_not_found_with_index(params: Tuple[BlockIdentifier, int]) -> NoReturn: try: block_identifier = params[0] transaction_index = to_integer_if_hex(params[1]) message = ( f"Transaction index: {transaction_index} " f"on block id: {block_identifier!r} not found." ) except IndexError: message = "Unknown transaction index or block identifier" raise TransactionNotFound(message) NULL_RESULT_FORMATTERS: Dict[RPCEndpoint, Callable[..., Any]] = { RPC.eth_getBlockByHash: raise_block_not_found, RPC.eth_getBlockByNumber: raise_block_not_found, RPC.eth_getBlockTransactionCountByHash: raise_block_not_found, RPC.eth_getBlockTransactionCountByNumber: raise_block_not_found, RPC.eth_getUncleCountByBlockHash: raise_block_not_found, RPC.eth_getUncleCountByBlockNumber: raise_block_not_found, RPC.eth_getUncleByBlockHashAndIndex: raise_block_not_found_for_uncle_at_index, RPC.eth_getUncleByBlockNumberAndIndex: raise_block_not_found_for_uncle_at_index, RPC.eth_getTransactionByHash: raise_transaction_not_found, RPC.eth_getTransactionByBlockHashAndIndex: raise_transaction_not_found_with_index, RPC.eth_getTransactionByBlockNumberAndIndex: raise_transaction_not_found_with_index, RPC.eth_getTransactionReceipt: raise_transaction_not_found, RPC.eth_getRawTransactionByBlockHashAndIndex: raise_transaction_not_found_with_index, RPC.eth_getRawTransactionByBlockNumberAndIndex: raise_transaction_not_found_with_index, RPC.eth_getRawTransactionByHash: raise_transaction_not_found, } def filter_wrapper( module: "Eth", method: RPCEndpoint, filter_id: HexStr, ) -> Union[BlockFilter, TransactionFilter, LogFilter]: if method == RPC.eth_newBlockFilter: return BlockFilter(filter_id, eth_module=module) elif method == RPC.eth_newPendingTransactionFilter: return TransactionFilter(filter_id, eth_module=module) elif method == RPC.eth_newFilter: return LogFilter(filter_id, eth_module=module) else: raise NotImplementedError('Filter wrapper needs to be used with either ' f'{RPC.eth_newBlockFilter}, {RPC.eth_newPendingTransactionFilter}' f' or {RPC.eth_newFilter}') FILTER_RESULT_FORMATTERS: Dict[RPCEndpoint, Callable[..., Any]] = { RPC.eth_newPendingTransactionFilter: filter_wrapper, RPC.eth_newBlockFilter: filter_wrapper, RPC.eth_newFilter: filter_wrapper, } @to_tuple def apply_module_to_formatters( formatters: Tuple[Callable[..., TReturn]], module: "Module", method_name: Union[RPCEndpoint, Callable[..., RPCEndpoint]], ) -> Iterable[Callable[..., TReturn]]: for f in formatters: yield partial(f, module, method_name) def get_result_formatters( method_name: Union[RPCEndpoint, Callable[..., RPCEndpoint]], module: "Module", ) -> Dict[str, Callable[..., Any]]: formatters = combine_formatters( (PYTHONIC_RESULT_FORMATTERS,), method_name ) formatters_requiring_module = combine_formatters( (FILTER_RESULT_FORMATTERS,), method_name ) partial_formatters = apply_module_to_formatters( formatters_requiring_module, module, method_name ) attrdict_formatter = apply_formatter_if(is_dict and not_attrdict, AttributeDict.recursive) return compose(*partial_formatters, attrdict_formatter, *formatters) def get_error_formatters( method_name: Union[RPCEndpoint, Callable[..., RPCEndpoint]] ) -> Callable[..., Any]: # Note error formatters work on the full response dict error_formatter_maps = (ERROR_FORMATTERS,) formatters = combine_formatters(error_formatter_maps, method_name) return compose(*formatters) def get_null_result_formatters( method_name: Union[RPCEndpoint, Callable[..., RPCEndpoint]] ) -> Callable[..., Any]: formatters = combine_formatters((NULL_RESULT_FORMATTERS,), method_name) return compose(*formatters)
""" This sample demonstrates retrieving sensor versions by hostname """ # pylint: disable=C0103,W0621,E0401 import json from falconpy.hosts import Hosts def device_list(offset: int, limit: int): """ I return a list of all devices for the CID, if I max out on the query limit, I can paginate """ result = falcon.QueryDevicesByFilter(parameters={"limit": limit, "offset": offset}) new_offset = result["body"]["meta"]["pagination"]["offset"] total = result["body"]["meta"]["pagination"]["total"] device_list = result["body"]["resources"] return new_offset, total, device_list def device_detail(aids: list): """ I return the device_id and agent_version for a list of AIDs I'm provided """ result = falcon.GetDeviceDetails(ids=aids) devices = [] # return just the aid and agent version for device in result["body"]["resources"]: res = {} res["hostname"] = device.get("hostname", None) res["agent_version"] = device.get("agent_version", None) devices.append(res) return devices # Grab our config parameters from a local file. with open('../config.json', 'r') as file_config: config = json.loads(file_config.read()) falcon = Hosts(creds={ "client_id": config["falcon_client_id"], "client_secret": config["falcon_client_secret"] }, # base_url = "https://YOUR_BASE_URL.crowdstrike.com" # Enter your base URL here if it is not US-1 ) offset = 0 # Start at the beginning displayed = 0 # This is just so we can show a running count total = 1 # Assume there is at least one limit = 500 # Quick limit to prove pagination while offset < total: offset, total, devices = device_list(offset, limit) details = device_detail(devices) for detail in details: displayed += 1 print(f"{displayed}: {detail["hostname"]} is on version {detail["agent_version"]}")
""" This sample demonstrates retrieving sensor versions by hostname """ # pylint: disable=C0103,W0621,E0401 import json from falconpy.hosts import Hosts def device_list(offset: int, limit: int): """ I return a list of all devices for the CID, if I max out on the query limit, I can paginate """ result = falcon.QueryDevicesByFilter(parameters={"limit": limit, "offset": offset}) new_offset = result["body"]["meta"]["pagination"]["offset"] total = result["body"]["meta"]["pagination"]["total"] device_list = result["body"]["resources"] return new_offset, total, device_list def device_detail(aids: list): """ I return the device_id and agent_version for a list of AIDs I'm provided """ result = falcon.GetDeviceDetails(ids=aids) devices = [] # return just the aid and agent version for device in result["body"]["resources"]: res = {} res["hostname"] = device.get("hostname", None) res["agent_version"] = device.get("agent_version", None) devices.append(res) return devices # Grab our config parameters from a local file. with open('../config.json', 'r') as file_config: config = json.loads(file_config.read()) falcon = Hosts(creds={ "client_id": config["falcon_client_id"], "client_secret": config["falcon_client_secret"] }, # base_url = "https://YOUR_BASE_URL.crowdstrike.com" # Enter your base URL here if it is not US-1 ) offset = 0 # Start at the beginning displayed = 0 # This is just so we can show a running count total = 1 # Assume there is at least one limit = 500 # Quick limit to prove pagination while offset < total: offset, total, devices = device_list(offset, limit) details = device_detail(devices) for detail in details: displayed += 1 print(f"{displayed}: {detail['hostname']} is on version {detail['agent_version']}")
pergunta = { 'pergunta 1': { 'pergunta': {'quanto é 2+2? '}, 'respostas': { 'a': '1', 'b': '4', 'c': '5' }, 'resposta_certa': 'b' }, 'pergunta 2': { 'pergunta': {'quanto é 7x2? '}, 'respostas': { 'a': '15', 'b': '10', 'c': '14' }, 'resposta_certa': 'c' }, 'pergunta 3': { 'pergunta': {'quanto é 50-40? '}, 'respostas': { 'a': -10, 'b': 15, 'c': 10 }, 'resposta_certa': 'c' }, } resp_certa = 0 for chave_p, chave_r in pergunta.items(): print(f'\n{chave_p}: {chave_r['pergunta']}') print('Escolha uma respostas') for resp_k, resp_value in chave_r['respostas'].items(): print(f'[{resp_k}]: {resp_value}') resposta_user = input('Sua resposta: ') if resposta_user == chave_r['resposta_certa']: print(10 * '====') print('resposta correta') print(10 * '====') resp_certa += 1 else: print(10 * '====') print('Resposta incorreta!!') print(10 * '====') qtd_perguntas = len(pergunta) porcentagem_acerto = resp_certa / qtd_perguntas * 100 print('media de acerto: {:.2f}%'. format(porcentagem_acerto))
pergunta = { 'pergunta 1': { 'pergunta': {'quanto é 2+2? '}, 'respostas': { 'a': '1', 'b': '4', 'c': '5' }, 'resposta_certa': 'b' }, 'pergunta 2': { 'pergunta': {'quanto é 7x2? '}, 'respostas': { 'a': '15', 'b': '10', 'c': '14' }, 'resposta_certa': 'c' }, 'pergunta 3': { 'pergunta': {'quanto é 50-40? '}, 'respostas': { 'a': -10, 'b': 15, 'c': 10 }, 'resposta_certa': 'c' }, } resp_certa = 0 for chave_p, chave_r in pergunta.items(): print(f'\n{chave_p}: {chave_r["pergunta"]}') print('Escolha uma respostas') for resp_k, resp_value in chave_r['respostas'].items(): print(f'[{resp_k}]: {resp_value}') resposta_user = input('Sua resposta: ') if resposta_user == chave_r['resposta_certa']: print(10 * '====') print('resposta correta') print(10 * '====') resp_certa += 1 else: print(10 * '====') print('Resposta incorreta!!') print(10 * '====') qtd_perguntas = len(pergunta) porcentagem_acerto = resp_certa / qtd_perguntas * 100 print('media de acerto: {:.2f}%'. format(porcentagem_acerto))
# This app is for educational purpose only. Insights gained is not financial advice. Use at your own risk! import streamlit as st from PIL import Image import pandas as pd import base64 import matplotlib.pyplot as plt from bs4 import BeautifulSoup import requests import json import time import tweepy import datetime from datetime import datetime, date, time import plotly.express as px import numpy as np from wordcloud import WordCloud, STOPWORDS import config import torch from transformers import pipeline from hate_speech_model import HateSpeechClassifier #---------------------------------# # New feature (make sure to upgrade your streamlit library) # pip install --upgrade streamlit #---------------------------------# # Page layout ## Page expands to full width st.set_page_config(layout="wide") #---------------------------------# # Title image = Image.open('4.PNG') st.image(image, width = None) df = pd.read_csv('data/final_hatespeech_data - final_hatespeech_data.csv') df['label'] = np.where(df['label']==1,'Hate speech','Normal speech') # Page layout (continued) ## Divide page to 3 columns (col1 = sidebar, col2 and col3 = page contents) col1 = st.sidebar col2, col3 = st.beta_columns((2,1)) #---------------------------------# # Sidebar - Main panel col1.header('Select options') ## Sidebar - Currency price unit locations = ['ELDORET', 'EMBU', 'GARISSA', 'GITHUNGURI', 'HOMA BAY', 'ISINYA', 'ISIOLO', 'JUJA', 'KABARAK', 'KABETE', 'KAJIADO', 'KAKAMEGA', 'KAPSABET', 'NAIROBI', 'KERICHO', 'KIAMBU'] region = pd.DataFrame(locations) selected_region = col1.selectbox('Select region', region) ## Sidebar - Start and End date start_date = col1.date_input('Start date', min_value=datetime(2021, 4, 1),max_value=datetime(2021, 4, 29)) start_date = pd.to_datetime(start_date) end_date = col1.date_input('End date', min_value=datetime(2021, 4, 1),max_value=datetime(2021, 4, 29)) end_date = pd.to_datetime(end_date) #date_range = col1.date_input('Date Range',value=(datetime(2020, 1, 1), datetime(2030, 1, 1)), help="choose a range or click same day twice") #st.title('Twitter hatespeech detection tool') st.markdown(""" This tool classifies tweets as **hate speech or non-hatespeech**! """) #---------------------------------# # About expander_bar_1 = st.beta_expander("About this tool") expander_bar_1.markdown(""" In an increasingly digital era where online social interactions are considered part of the social context, it is proving inevitable that machine learning should be used to protect people from harmful content. This has been evidenced by the multitude of instances where hate speech propagated online has led to physical injury and loss of lives across the world. Government institutions should now consider online interactions as spaces where potential crimes may occur just like in the physical world. This tool identifies hatespeech as tweets that can be in following three formal classes: * **HATE:** This class contains tweets which highlight negative attributes or deficiencies of certain groups of individuals. This class includes hateful comments towards individuals based on race, political opinion, sexual orientation, gender, social status, health condition, etc. * **OFFN:** This class contains tweets which are degrading, dehumanizing or insulting towards an individual. It encompasses cases of threatening with violent acts. * **PRFN:** This class contains tweets with explicit content, profane words or unacceptable language in the absence of insults and abuse. This typically concerns the usage of swearwords and cursing. Political hate speech is the greatest area of concern in regards to Kenya and thus this will be the area of focus for this tool. """) #---------------------------------# # Scraping of tweets expander_bar_2 = st.beta_expander("Search/load tweets") #@st.cache # Load classification model with st.spinner('Loading...'): model = HateSpeechClassifier() model.load_state_dict(torch.load(config.MODEL_PATH, map_location=torch.device('cpu'))) # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") device = torch.device("cpu") @st.cache(allow_output_mutation=True) def sentence_prediction(tw, model): tokenizer = config.TOKENIZER max_len = 140 review = str(tw) inputs = tokenizer.encode_plus( review, None, add_special_tokens=True, max_length=max_len, return_token_type_ids=False, truncation=True, padding="max_length" ) class_names = ['Normal Speech','Hate Speech'] input_ids = inputs['input_ids'] mask = inputs['attention_mask'] padding_length = max_len - len(input_ids) input_ids = input_ids + ([0] * padding_length) mask = mask + ([0] * padding_length) input_ids = torch.tensor(input_ids, dtype=torch.long).unsqueeze(0) attention_mask = torch.tensor(mask, dtype=torch.long).unsqueeze(0) input_ids = input_ids.to(device, dtype=torch.long) attention_mask = attention_mask.to(device, dtype=torch.long) outputs = model(input_ids=input_ids, attention_mask=attention_mask ) outputs = torch.sigmoid(outputs).cpu().detach().numpy() out = outputs[0][0] hate_prediction = float(out) if hate_prediction >= 0.5: return f"{class_names[1]}" else: return f"{class_names[0]}" ### SINGLE TWEET CLASSIFICATION ### expander_bar_2.subheader('Single tweet classification') # Get sentence input, preprocess it, and convert to flair.data.Sentence format tw = expander_bar_2.text_input('Tweet:') if tw != '': # Predict tweet sentence = sentence_prediction(tw, model) # Show prediction #with st.spinner('Predicting...'): #sentence if sentence == "Hate Speech": zero_model = 'typeform/mobilebert-uncased-mnli' classifier = pipeline("zero-shot-classification", model=zero_model,tokenizer=config.TOKENIZER) text = tw candidate_labels = ['Violent', 'Offensive', 'Profane'] result = classifier(text, candidate_labels) data = pd.DataFrame({'Hate Sub-clusters': result['labels'], 'Confidence Level': result['scores']}) clus = data[data['Confidence Level'] == data['Confidence Level'].max()] clus_p = clus['Hate Sub-clusters'].values clus_pp = clus_p[0] clus_c = clus['Confidence Level'].values clus_cc = round(clus_c[0], 2) #print('hate sub-cluster: ', clus_pp ,' with a Confidence Level of ', clus_cc) #f"{"hate sub-cluster": clus_pp,"Confidence Level": clus_cc}" with st.spinner('Predicting...'): speech = f"**{sentence}**" subclust = f"**Hate sub-cluster: {clus_pp} with a Confidence Level of {clus_cc}**" #st.markdown(speech) expander_bar_2.write(speech) #st.markdown(subclust) expander_bar_2.write(subclust) else: with st.spinner('Predicting...'): speech = f"**{sentence}**" #st.markdown(speech) expander_bar_2.write(speech) #st.write(alt.Chart(data).mark_bar().encode( # x='Confidence Level', # y=alt.X('Hate Sub-clusters', sort=None), # color='Hate Sub-clusters' #).configure_axis( # grid=False #).properties( # width=500, # height=150 #) # ) # st.write(out) ### TWEET SEARCH AND CLASSIFY ### expander_bar_2.subheader('Offline Batch tweet classification') # Initialize empty dataframe tweet_data = pd.DataFrame({ 'tweet': [], 'predicted-sentiment': [], 'location': [], 'tweet_date': [] }) uploaded_file = expander_bar_2.file_uploader("Choose a file") if uploaded_file is not None: df = pd.read_csv(uploaded_file) expander_bar_2.write(df) # classify tweet #for tweet in df['tweet']: for index, row in df.iterrows(): # Skip iteration if tweet is empty #if tweet in ('', ' '): if row['tweet'] in ('', ' '): continue # Make predictions class_names = ['Hate Speech', 'Normal Speech'] sentence = sentence_prediction(row['tweet'], model) # classifier.predict(sentence) sentiment = sentence max_len = 140 if sentiment == "Hate Speech": #tokenizer = AutoTokenizer.from_pretrained('typeform/mobilebert-uncased-mnli') zero_model = 'typeform/mobilebert-uncased-mnli' classifier = pipeline("zero-shot-classification", model=zero_model,tokenizer=config.TOKENIZER) text = row['tweet'] candidate_labels = ['Violent', 'Offensive', 'Profane'] result = classifier(text, candidate_labels) data = pd.DataFrame({'Hate Sub-clusters': result['labels'], 'Confidence Level': result['scores']}) clus = data[data['Confidence Level'] == data['Confidence Level'].max()] clus_p = clus['Hate Sub-clusters'].values clus_pp = clus_p[0] clus_c = clus['Confidence Level'].values clus_cc = round(clus_c[0], 2) #tweet_data = tweet_data.append({'tweet': tweet, 'predicted-sentiment': sentiment, 'hate sub-cluster': clus_pp, # 'confidence level': clus_cc}, ignore_index=True) tweet_data = tweet_data.append( {'tweet': row['tweet'], 'predicted-sentiment': sentiment, 'hate sub-cluster': clus_pp, 'confidence level': clus_cc, 'location': row['location'], 'tweet_date': row['tweet_date']}, ignore_index=True) #tweet_data = tweet_data.reindex( # columns=['tweet', 'predicted-sentiment', 'hate sub-cluster', 'confidence level']) tweet_data = tweet_data.reindex( columns=['tweet', 'predicted-sentiment', 'hate sub-cluster', 'confidence level', 'location', 'tweet_date']) else: non = '' #tweet_data = tweet_data.append( # {'tweet': tweet, 'predicted-sentiment': sentiment, 'hate sub-cluster': non, 'confidence level': non}, # ignore_index=True) tweet_data = tweet_data.append( {'tweet': row['tweet'], 'predicted-sentiment': sentiment, 'hate sub-cluster': non, 'confidence level': non, 'location': row['location'], 'tweet_date': row['tweet_date']}, ignore_index=True) tweet_data = tweet_data.reindex( columns=['tweet', 'predicted-sentiment', 'hate sub-cluster', 'confidence level', 'location', 'tweet_date']) #columns=['tweet', 'predicted-sentiment', 'hate sub-cluster', 'confidence level']) # As long as the query is valid (not empty or equal to '#')... #if query != '' and query != '#': # with st.spinner(f'Searching for and analyzing {query}...'): # Show query data and sentiment if available try: #expander_bar_2.write(tweet_data) tweet_data.to_csv("predicted_tweet_data") tweet_data['tweet_date'] = pd.to_datetime(tweet_data['tweet_date']) tweet_data_filtered = tweet_data[ (tweet_data['location'] == selected_region) & (tweet_data['tweet_date'] >= start_date) & ( tweet_data['tweet_date'] <= end_date)] expander_bar_2.write(tweet_data_filtered) except NameError: # if no queries have been made yet pass #---------------------------------# # Overview of extracted tweets tweet_data['tweet_date'] = pd.to_datetime(tweet_data['tweet_date']) tweet_data_filtered = tweet_data[(tweet_data['location']==selected_region) & (tweet_data['tweet_date']>=start_date) & (tweet_data['tweet_date']<=end_date)] expander_bar_3 = st.beta_expander("Visual overview of loaded tweets") sentiment_count = tweet_data_filtered['predicted-sentiment'].value_counts() sentiment_count = pd.DataFrame({'Sentiments':sentiment_count.index,'Tweets':sentiment_count.values}) # region_count = df['location'].value_counts() # region_count = pd.DataFrame({'Region':region_count.index,'Tweets':region_count.values}) if len(sentiment_count) == 0: expander_bar_3.markdown('There are no visuals at the moment...... Please load data to show some visuals') else: fig_1 = px.bar(sentiment_count,x='Sentiments',y='Tweets',color='Tweets',height=500) expander_bar_3.plotly_chart(fig_1) fig_2 = px.pie(sentiment_count,values='Tweets',names='Sentiments') expander_bar_3.plotly_chart(fig_2) # fig_3 = px.bar(region_count,x='Region',y='Tweets',color='Tweets',height=500) # expander_bar_3.plotly_chart(fig_3) #expander_bar_3.table() #---------------------------------# # Hate speech tweets expander_bar_3 = st.beta_expander("View hatespeech tweets") df_hatespeech = tweet_data_filtered[tweet_data_filtered['predicted-sentiment']=='Hate Speech'] if len(df_hatespeech) == 0: expander_bar_3.markdown('Nothing to show here since hate speech has not been detected in the set of uploaded tweets') else: expander_bar_3.dataframe(df_hatespeech[['tweet','predicted-sentiment']]) #---------------------------------# # Non-hatespeech tweets expander_bar_4 = st.beta_expander("View normal text tweets") df_normalspeech = tweet_data_filtered[tweet_data_filtered['predicted-sentiment']=='Normal Speech'] if len(df_normalspeech) == 0: expander_bar_4.markdown('Nothing to show here since normal speech has not been detected in the set of uploaded tweets') else: expander_bar_4.dataframe(df_normalspeech[['tweet','predicted-sentiment']]) #---------------------------------# #---------------------------------# #---------------------------------# # Hate speech words st.set_option('deprecation.showPyplotGlobalUse', False) expander_bar_5 = st.beta_expander("Hate speech key words") if len(df_hatespeech) == 0: expander_bar_5.markdown('Nothing to show here since hate speech has not been detected in the set of uploaded tweets') else: words = " ".join(df_hatespeech["tweet"]) processed_words = " ".join([word for word in words.split() if "http" not in word and not word.startswith("@") and word != "RT"]) wordcloud = WordCloud(stopwords=STOPWORDS, background_color="white", width=800, height=640).generate(processed_words) plt.imshow(wordcloud) plt.xticks([]) plt.yticks([]) expander_bar_5.pyplot() #---------------------------------#
# This app is for educational purpose only. Insights gained is not financial advice. Use at your own risk! import streamlit as st from PIL import Image import pandas as pd import base64 import matplotlib.pyplot as plt from bs4 import BeautifulSoup import requests import json import time import tweepy import datetime from datetime import datetime, date, time import plotly.express as px import numpy as np from wordcloud import WordCloud, STOPWORDS import config import torch from transformers import pipeline from hate_speech_model import HateSpeechClassifier #---------------------------------# # New feature (make sure to upgrade your streamlit library) # pip install --upgrade streamlit #---------------------------------# # Page layout ## Page expands to full width st.set_page_config(layout="wide") #---------------------------------# # Title image = Image.open('4.PNG') st.image(image, width = None) df = pd.read_csv('data/final_hatespeech_data - final_hatespeech_data.csv') df['label'] = np.where(df['label']==1,'Hate speech','Normal speech') # Page layout (continued) ## Divide page to 3 columns (col1 = sidebar, col2 and col3 = page contents) col1 = st.sidebar col2, col3 = st.beta_columns((2,1)) #---------------------------------# # Sidebar - Main panel col1.header('Select options') ## Sidebar - Currency price unit locations = ['ELDORET', 'EMBU', 'GARISSA', 'GITHUNGURI', 'HOMA BAY', 'ISINYA', 'ISIOLO', 'JUJA', 'KABARAK', 'KABETE', 'KAJIADO', 'KAKAMEGA', 'KAPSABET', 'NAIROBI', 'KERICHO', 'KIAMBU'] region = pd.DataFrame(locations) selected_region = col1.selectbox('Select region', region) ## Sidebar - Start and End date start_date = col1.date_input('Start date', min_value=datetime(2021, 4, 1),max_value=datetime(2021, 4, 29)) start_date = pd.to_datetime(start_date) end_date = col1.date_input('End date', min_value=datetime(2021, 4, 1),max_value=datetime(2021, 4, 29)) end_date = pd.to_datetime(end_date) #date_range = col1.date_input('Date Range',value=(datetime(2020, 1, 1), datetime(2030, 1, 1)), help="choose a range or click same day twice") #st.title('Twitter hatespeech detection tool') st.markdown(""" This tool classifies tweets as **hate speech or non-hatespeech**! """) #---------------------------------# # About expander_bar_1 = st.beta_expander("About this tool") expander_bar_1.markdown(""" In an increasingly digital era where online social interactions are considered part of the social context, it is proving inevitable that machine learning should be used to protect people from harmful content. This has been evidenced by the multitude of instances where hate speech propagated online has led to physical injury and loss of lives across the world. Government institutions should now consider online interactions as spaces where potential crimes may occur just like in the physical world. This tool identifies hatespeech as tweets that can be in following three formal classes: * **HATE:** This class contains tweets which highlight negative attributes or deficiencies of certain groups of individuals. This class includes hateful comments towards individuals based on race, political opinion, sexual orientation, gender, social status, health condition, etc. * **OFFN:** This class contains tweets which are degrading, dehumanizing or insulting towards an individual. It encompasses cases of threatening with violent acts. * **PRFN:** This class contains tweets with explicit content, profane words or unacceptable language in the absence of insults and abuse. This typically concerns the usage of swearwords and cursing. Political hate speech is the greatest area of concern in regards to Kenya and thus this will be the area of focus for this tool. """) #---------------------------------# # Scraping of tweets expander_bar_2 = st.beta_expander("Search/load tweets") #@st.cache # Load classification model with st.spinner('Loading...'): model = HateSpeechClassifier() model.load_state_dict(torch.load(config.MODEL_PATH, map_location=torch.device('cpu'))) # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") device = torch.device("cpu") @st.cache(allow_output_mutation=True) def sentence_prediction(tw, model): tokenizer = config.TOKENIZER max_len = 140 review = str(tw) inputs = tokenizer.encode_plus( review, None, add_special_tokens=True, max_length=max_len, return_token_type_ids=False, truncation=True, padding="max_length" ) class_names = ['Normal Speech','Hate Speech'] input_ids = inputs['input_ids'] mask = inputs['attention_mask'] padding_length = max_len - len(input_ids) input_ids = input_ids + ([0] * padding_length) mask = mask + ([0] * padding_length) input_ids = torch.tensor(input_ids, dtype=torch.long).unsqueeze(0) attention_mask = torch.tensor(mask, dtype=torch.long).unsqueeze(0) input_ids = input_ids.to(device, dtype=torch.long) attention_mask = attention_mask.to(device, dtype=torch.long) outputs = model(input_ids=input_ids, attention_mask=attention_mask ) outputs = torch.sigmoid(outputs).cpu().detach().numpy() out = outputs[0][0] hate_prediction = float(out) if hate_prediction >= 0.5: return f"{class_names[1]}" else: return f"{class_names[0]}" ### SINGLE TWEET CLASSIFICATION ### expander_bar_2.subheader('Single tweet classification') # Get sentence input, preprocess it, and convert to flair.data.Sentence format tw = expander_bar_2.text_input('Tweet:') if tw != '': # Predict tweet sentence = sentence_prediction(tw, model) # Show prediction #with st.spinner('Predicting...'): #sentence if sentence == "Hate Speech": zero_model = 'typeform/mobilebert-uncased-mnli' classifier = pipeline("zero-shot-classification", model=zero_model,tokenizer=config.TOKENIZER) text = tw candidate_labels = ['Violent', 'Offensive', 'Profane'] result = classifier(text, candidate_labels) data = pd.DataFrame({'Hate Sub-clusters': result['labels'], 'Confidence Level': result['scores']}) clus = data[data['Confidence Level'] == data['Confidence Level'].max()] clus_p = clus['Hate Sub-clusters'].values clus_pp = clus_p[0] clus_c = clus['Confidence Level'].values clus_cc = round(clus_c[0], 2) #print('hate sub-cluster: ', clus_pp ,' with a Confidence Level of ', clus_cc) #f"{'hate sub-cluster': clus_pp,'Confidence Level': clus_cc}" with st.spinner('Predicting...'): speech = f"**{sentence}**" subclust = f"**Hate sub-cluster: {clus_pp} with a Confidence Level of {clus_cc}**" #st.markdown(speech) expander_bar_2.write(speech) #st.markdown(subclust) expander_bar_2.write(subclust) else: with st.spinner('Predicting...'): speech = f"**{sentence}**" #st.markdown(speech) expander_bar_2.write(speech) #st.write(alt.Chart(data).mark_bar().encode( # x='Confidence Level', # y=alt.X('Hate Sub-clusters', sort=None), # color='Hate Sub-clusters' #).configure_axis( # grid=False #).properties( # width=500, # height=150 #) # ) # st.write(out) ### TWEET SEARCH AND CLASSIFY ### expander_bar_2.subheader('Offline Batch tweet classification') # Initialize empty dataframe tweet_data = pd.DataFrame({ 'tweet': [], 'predicted-sentiment': [], 'location': [], 'tweet_date': [] }) uploaded_file = expander_bar_2.file_uploader("Choose a file") if uploaded_file is not None: df = pd.read_csv(uploaded_file) expander_bar_2.write(df) # classify tweet #for tweet in df['tweet']: for index, row in df.iterrows(): # Skip iteration if tweet is empty #if tweet in ('', ' '): if row['tweet'] in ('', ' '): continue # Make predictions class_names = ['Hate Speech', 'Normal Speech'] sentence = sentence_prediction(row['tweet'], model) # classifier.predict(sentence) sentiment = sentence max_len = 140 if sentiment == "Hate Speech": #tokenizer = AutoTokenizer.from_pretrained('typeform/mobilebert-uncased-mnli') zero_model = 'typeform/mobilebert-uncased-mnli' classifier = pipeline("zero-shot-classification", model=zero_model,tokenizer=config.TOKENIZER) text = row['tweet'] candidate_labels = ['Violent', 'Offensive', 'Profane'] result = classifier(text, candidate_labels) data = pd.DataFrame({'Hate Sub-clusters': result['labels'], 'Confidence Level': result['scores']}) clus = data[data['Confidence Level'] == data['Confidence Level'].max()] clus_p = clus['Hate Sub-clusters'].values clus_pp = clus_p[0] clus_c = clus['Confidence Level'].values clus_cc = round(clus_c[0], 2) #tweet_data = tweet_data.append({'tweet': tweet, 'predicted-sentiment': sentiment, 'hate sub-cluster': clus_pp, # 'confidence level': clus_cc}, ignore_index=True) tweet_data = tweet_data.append( {'tweet': row['tweet'], 'predicted-sentiment': sentiment, 'hate sub-cluster': clus_pp, 'confidence level': clus_cc, 'location': row['location'], 'tweet_date': row['tweet_date']}, ignore_index=True) #tweet_data = tweet_data.reindex( # columns=['tweet', 'predicted-sentiment', 'hate sub-cluster', 'confidence level']) tweet_data = tweet_data.reindex( columns=['tweet', 'predicted-sentiment', 'hate sub-cluster', 'confidence level', 'location', 'tweet_date']) else: non = '' #tweet_data = tweet_data.append( # {'tweet': tweet, 'predicted-sentiment': sentiment, 'hate sub-cluster': non, 'confidence level': non}, # ignore_index=True) tweet_data = tweet_data.append( {'tweet': row['tweet'], 'predicted-sentiment': sentiment, 'hate sub-cluster': non, 'confidence level': non, 'location': row['location'], 'tweet_date': row['tweet_date']}, ignore_index=True) tweet_data = tweet_data.reindex( columns=['tweet', 'predicted-sentiment', 'hate sub-cluster', 'confidence level', 'location', 'tweet_date']) #columns=['tweet', 'predicted-sentiment', 'hate sub-cluster', 'confidence level']) # As long as the query is valid (not empty or equal to '#')... #if query != '' and query != '#': # with st.spinner(f'Searching for and analyzing {query}...'): # Show query data and sentiment if available try: #expander_bar_2.write(tweet_data) tweet_data.to_csv("predicted_tweet_data") tweet_data['tweet_date'] = pd.to_datetime(tweet_data['tweet_date']) tweet_data_filtered = tweet_data[ (tweet_data['location'] == selected_region) & (tweet_data['tweet_date'] >= start_date) & ( tweet_data['tweet_date'] <= end_date)] expander_bar_2.write(tweet_data_filtered) except NameError: # if no queries have been made yet pass #---------------------------------# # Overview of extracted tweets tweet_data['tweet_date'] = pd.to_datetime(tweet_data['tweet_date']) tweet_data_filtered = tweet_data[(tweet_data['location']==selected_region) & (tweet_data['tweet_date']>=start_date) & (tweet_data['tweet_date']<=end_date)] expander_bar_3 = st.beta_expander("Visual overview of loaded tweets") sentiment_count = tweet_data_filtered['predicted-sentiment'].value_counts() sentiment_count = pd.DataFrame({'Sentiments':sentiment_count.index,'Tweets':sentiment_count.values}) # region_count = df['location'].value_counts() # region_count = pd.DataFrame({'Region':region_count.index,'Tweets':region_count.values}) if len(sentiment_count) == 0: expander_bar_3.markdown('There are no visuals at the moment...... Please load data to show some visuals') else: fig_1 = px.bar(sentiment_count,x='Sentiments',y='Tweets',color='Tweets',height=500) expander_bar_3.plotly_chart(fig_1) fig_2 = px.pie(sentiment_count,values='Tweets',names='Sentiments') expander_bar_3.plotly_chart(fig_2) # fig_3 = px.bar(region_count,x='Region',y='Tweets',color='Tweets',height=500) # expander_bar_3.plotly_chart(fig_3) #expander_bar_3.table() #---------------------------------# # Hate speech tweets expander_bar_3 = st.beta_expander("View hatespeech tweets") df_hatespeech = tweet_data_filtered[tweet_data_filtered['predicted-sentiment']=='Hate Speech'] if len(df_hatespeech) == 0: expander_bar_3.markdown('Nothing to show here since hate speech has not been detected in the set of uploaded tweets') else: expander_bar_3.dataframe(df_hatespeech[['tweet','predicted-sentiment']]) #---------------------------------# # Non-hatespeech tweets expander_bar_4 = st.beta_expander("View normal text tweets") df_normalspeech = tweet_data_filtered[tweet_data_filtered['predicted-sentiment']=='Normal Speech'] if len(df_normalspeech) == 0: expander_bar_4.markdown('Nothing to show here since normal speech has not been detected in the set of uploaded tweets') else: expander_bar_4.dataframe(df_normalspeech[['tweet','predicted-sentiment']]) #---------------------------------# #---------------------------------# #---------------------------------# # Hate speech words st.set_option('deprecation.showPyplotGlobalUse', False) expander_bar_5 = st.beta_expander("Hate speech key words") if len(df_hatespeech) == 0: expander_bar_5.markdown('Nothing to show here since hate speech has not been detected in the set of uploaded tweets') else: words = " ".join(df_hatespeech["tweet"]) processed_words = " ".join([word for word in words.split() if "http" not in word and not word.startswith("@") and word != "RT"]) wordcloud = WordCloud(stopwords=STOPWORDS, background_color="white", width=800, height=640).generate(processed_words) plt.imshow(wordcloud) plt.xticks([]) plt.yticks([]) expander_bar_5.pyplot() #---------------------------------#
from webull import webull, streamconn # cli.py import click import pickle import os HEADER = '\033[95m' BLUE = '\033[94m' CYAN = '\033[96m' GREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' RED = '\u001b[31m' YELLOW = '\u001b[33m' MAGENTA = '\u001b[35m' SESSION_FILE = 'session.pickle' wb = webull() cur_stream_ticker = '' def bordered(text): lines = text.splitlines() width = max(len(s) for s in lines) res = ['┌' + '─' * width + '┐'] for s in lines: res.append('│' + (s + ' ' * width)[:width] + '│') res.append('└' + '─' * width + '┘') return '\n'.join(res) def on_price_message(topic, data): # print (data) #the following fields will vary by topic number you recieve (topic 105 in this case) global cur_stream_ticker print(f"Ticker: {cur_stream_ticker.upper()}({topic["tickerId"]}), Price: {data["deal"]["price"]}", end="\r", flush=True) #all your algo precessing code goes here def on_order_message(topic, data): print('Order message: '+data) @click.group() def main(): if os.path.exists('session.pickle'): try: sesh = pickle.load(open(SESSION_FILE,'rb')) print('===== {}Logged In ✓{}\n===== Current User: {}{}{}\n===== Refresh Token: {}\n===== Session Expiration Date: {}\n'.format(GREEN,ENDC,BLUE,sesh['settings']['userId'], ENDC, sesh['refreshToken'], sesh['tokenExpireTime'])) if 'accessToken' in sesh: wb._access_token = sesh['accessToken'] wb._refresh_token = sesh['refreshToken'] wb._token_expire = sesh['tokenExpireTime'] wb._uuid = sesh['uuid'] wb._account_id = sesh['account_id'] except Exception as exception: print('{}Session file was unable to load, resetting session{}'.format(FAIL, ENDC)) filename = os.path.join('', SESSION_FILE) if os.path.exists(filename): os.remove(filename) else: print('===== {}User is not logged in{}\n===== Please run {}sendmfa{} before {}login{} to get mfa code\n'.format(FAIL, ENDC,BLUE, ENDC, BLUE, ENDC)) @main.command() def status(): pass @main.command() @click.argument('email') def getmfa(email): print('Sending MFA Code to {}{}{}'.format(CYAN,email,ENDC)) wb.get_mfa(email) #mobile number should be okay as well. print('MFA code sent') @main.command() @click.argument('email') @click.argument('password') @click.argument('mfa_code') def login(email, password, mfa_code): print('Logging into webull') login_response = wb.login(email, password, mfa=mfa_code) wb._access_token = login_response['accessToken'] wb._refresh_token = login_response['refreshToken'] wb._token_expire = login_response['tokenExpireTime'] wb._uuid = login_response['uuid'] login_response['account_id'] = wb.get_account_id() filename = os.path.join('', SESSION_FILE) if os.path.exists(filename): print('{}Overwriting existing session with new login{}'.format(WARNING, ENDC)) pickle.dump(login_response, open(filename, 'wb')) print('{}Login Successfull{}'.format(GREEN,ENDC)) print('===== {}Logged In ✓{}\n===== Current User: {}{}{}\n===== Refresh Token: {}\n===== Session Expiration Date: {}\n'.format(BOLD,GREEN,ENDC,BLUE,login_response['settings']['userId'], ENDC, login_response['refreshToken'], login_response['tokenExpireTime'])) @main.command() def logout(): if wb._access_token == '': print('User already logged out') return wb.logout() filename = os.path.join('', SESSION_FILE) if os.path.exists(filename): os.remove(filename) print('User successfully logged out') print('===== {}User is not logged in{}\n===== Please run {}sendmfa{} before {}login{} to get mfa code\n'.format(FAIL, ENDC,BLUE, ENDC, BLUE, ENDC)) @main.command() @click.argument('ticker') def get_stock_price(ticker): try: stock_quote = wb.get_quote(stock=ticker) print('Price for {}{}{}: {}{}{}, bid: {}, ask: {}\n'.format(YELLOW, ticker.upper(), ENDC, GREEN, sum([float(stock_quote['askList'][0]['price']),float(stock_quote['bidList'][0]['price'])])/2, ENDC, stock_quote['bidList'][0]['price'], stock_quote['askList'][0]['price'])) except Exception as e: print(e) print('{}Ticker name {}{}{} was not found{}\n'.format(FAIL, YELLOW, ticker.upper(), RED, ENDC)) @main.command() @click.argument('ticker') def buy(ticker): # quantity must be 95% of daytrading margin try: wb.get_trade_token('123456') wb.place_order(stock=ticker, price=0, quant=0) except Exception as e: print('Error while placing order') print(e) @main.command() def watch_pos(): try: positions = wb.get_positions() if not(len(positions)>0): print('No positions were found\n') return for i, position in enumerate(positions): print('{}.'.format(i+1), end='') print('\tTicker: {}\n\tPositon: {} shares\n'.format(position['ticker']['symbol'], position['position'])) option_pick = input('Select which position to watch: ') print('Watching', positions[int(option_pick)-1]['ticker']['symbol']) print('Type following commands for {}'.format(positions[int(option_pick)-1]['ticker']['symbol'])) print('(1) Set stop loss at breakeven, (2) Set stop loss slightly above breakeven (3) Set own stop loss') except Exception as e: print('Something went wrong:', e) # Position response # [{'id': 398413331259465728, 'brokerId': 8, 'ticker': {'tickerId': 913324077, 'symbol': 'MU', 'name': 'Micron Tech', 'tinyName': 'Micron Tech', 'listStatus': 1, 'exchangeCode': 'NAS', 'exchangeId': 10, 'type': 2, 'regionId': 6, 'currencyId': 247, 'currencyCode': 'USD', 'secType': [61], 'disExchangeCode': 'NASDAQ', 'disSymbol': 'MU'}, 'position': '850', 'assetType': 'stock', 'cost': '63877.50', 'costPrice': '75.150', 'currency': 'USD', 'lastPrice': '75.08', 'marketValue': '63818.00', 'unrealizedProfitLoss': '-59.50', 'unrealizedProfitLossRate': '-0.0009', 'positionProportion': '1.0000', 'exchangeRate': '1', 'lock': False, 'updatePositionTimeStamp': 1609435085000}] # open market response # {'tickerId': 913254235, 'exchangeId': 10, 'type': 2, 'secType': [61], 'regionId': 6, 'regionCode': 'US', 'currencyId': 247, 'name': 'AMD', 'symbol': 'AMD', 'disSymbol': 'AMD', 'disExchangeCode': 'NASDAQ', 'exchangeCode': 'NAS', 'listStatus': 1, 'template': 'stock', 'derivativeSupport': 1, 'tradeTime': '2020-12-31T15:03:31.110+0000', 'status': 'T', 'close': '91.45', 'change': '-0.85', 'changeRatio': '-0.0092', 'marketValue': '109981965736.91', 'volume': '5151536', 'turnoverRate': '0.0043', 'timeZone': 'America/New_York', 'tzName': 'EST', 'preClose': '92.29', 'open': '92.12', 'high': '92.30', 'low': '90.87', 'vibrateRatio': '0.0155', 'avgVol10D': '30466435', 'avgVol3M': '46084705', 'negMarketValue': '109300183914.11', 'pe': '277.95', 'forwardPe': '74.26', 'indicatedPe': '485.12', 'peTtm': '284.88', 'eps': '0.3290', 'epsTtm': '0.3210', 'pb': '28.42', 'totalShares': '1202711638', 'outstandingShares': '1195255989', 'fiftyTwoWkHigh': '97.98', 'fiftyTwoWkLow': '18.90', 'yield': '0.0000', 'baSize': 1, 'ntvSize': 0, 'askList': [{'price': '91.45', 'volume': '100'}], 'bidList': [{'price': '91.43', 'volume': '300'}], 'currencyCode': 'USD', 'lotSize': '1', 'latestDividendDate': '2000-08-22', 'latestSplitDate': '2000-08-22', 'latestEarningsDate': '2020-10-27', 'ps': '12.76', 'bps': '3.217', 'estimateEarningsDate': '01/26-02/01', 'tradeStatus': 'T'} @main.command() @click.argument('ticker') def stream_stock_price(ticker): global cur_stream_ticker try: cur_stream_ticker = ticker conn = streamconn.StreamConn(debug_flg=False) conn.price_func = on_price_message conn.order_func = on_order_message if not wb._access_token is None and len(wb._access_token) > 1: conn.connect(wb._did, access_token=wb._access_token) else: conn.connect(wb._did) conn.subscribe(tId=wb.get_ticker(ticker)) conn.run_loop_once() conn.run_blocking_loop() except: cur_stream_ticker = '' print('Closing stream') if __name__ == "__main__": main()
from webull import webull, streamconn # cli.py import click import pickle import os HEADER = '\033[95m' BLUE = '\033[94m' CYAN = '\033[96m' GREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' RED = '\u001b[31m' YELLOW = '\u001b[33m' MAGENTA = '\u001b[35m' SESSION_FILE = 'session.pickle' wb = webull() cur_stream_ticker = '' def bordered(text): lines = text.splitlines() width = max(len(s) for s in lines) res = ['┌' + '─' * width + '┐'] for s in lines: res.append('│' + (s + ' ' * width)[:width] + '│') res.append('└' + '─' * width + '┘') return '\n'.join(res) def on_price_message(topic, data): # print (data) #the following fields will vary by topic number you recieve (topic 105 in this case) global cur_stream_ticker print(f"Ticker: {cur_stream_ticker.upper()}({topic['tickerId']}), Price: {data['deal']['price']}", end="\r", flush=True) #all your algo precessing code goes here def on_order_message(topic, data): print('Order message: '+data) @click.group() def main(): if os.path.exists('session.pickle'): try: sesh = pickle.load(open(SESSION_FILE,'rb')) print('===== {}Logged In ✓{}\n===== Current User: {}{}{}\n===== Refresh Token: {}\n===== Session Expiration Date: {}\n'.format(GREEN,ENDC,BLUE,sesh['settings']['userId'], ENDC, sesh['refreshToken'], sesh['tokenExpireTime'])) if 'accessToken' in sesh: wb._access_token = sesh['accessToken'] wb._refresh_token = sesh['refreshToken'] wb._token_expire = sesh['tokenExpireTime'] wb._uuid = sesh['uuid'] wb._account_id = sesh['account_id'] except Exception as exception: print('{}Session file was unable to load, resetting session{}'.format(FAIL, ENDC)) filename = os.path.join('', SESSION_FILE) if os.path.exists(filename): os.remove(filename) else: print('===== {}User is not logged in{}\n===== Please run {}sendmfa{} before {}login{} to get mfa code\n'.format(FAIL, ENDC,BLUE, ENDC, BLUE, ENDC)) @main.command() def status(): pass @main.command() @click.argument('email') def getmfa(email): print('Sending MFA Code to {}{}{}'.format(CYAN,email,ENDC)) wb.get_mfa(email) #mobile number should be okay as well. print('MFA code sent') @main.command() @click.argument('email') @click.argument('password') @click.argument('mfa_code') def login(email, password, mfa_code): print('Logging into webull') login_response = wb.login(email, password, mfa=mfa_code) wb._access_token = login_response['accessToken'] wb._refresh_token = login_response['refreshToken'] wb._token_expire = login_response['tokenExpireTime'] wb._uuid = login_response['uuid'] login_response['account_id'] = wb.get_account_id() filename = os.path.join('', SESSION_FILE) if os.path.exists(filename): print('{}Overwriting existing session with new login{}'.format(WARNING, ENDC)) pickle.dump(login_response, open(filename, 'wb')) print('{}Login Successfull{}'.format(GREEN,ENDC)) print('===== {}Logged In ✓{}\n===== Current User: {}{}{}\n===== Refresh Token: {}\n===== Session Expiration Date: {}\n'.format(BOLD,GREEN,ENDC,BLUE,login_response['settings']['userId'], ENDC, login_response['refreshToken'], login_response['tokenExpireTime'])) @main.command() def logout(): if wb._access_token == '': print('User already logged out') return wb.logout() filename = os.path.join('', SESSION_FILE) if os.path.exists(filename): os.remove(filename) print('User successfully logged out') print('===== {}User is not logged in{}\n===== Please run {}sendmfa{} before {}login{} to get mfa code\n'.format(FAIL, ENDC,BLUE, ENDC, BLUE, ENDC)) @main.command() @click.argument('ticker') def get_stock_price(ticker): try: stock_quote = wb.get_quote(stock=ticker) print('Price for {}{}{}: {}{}{}, bid: {}, ask: {}\n'.format(YELLOW, ticker.upper(), ENDC, GREEN, sum([float(stock_quote['askList'][0]['price']),float(stock_quote['bidList'][0]['price'])])/2, ENDC, stock_quote['bidList'][0]['price'], stock_quote['askList'][0]['price'])) except Exception as e: print(e) print('{}Ticker name {}{}{} was not found{}\n'.format(FAIL, YELLOW, ticker.upper(), RED, ENDC)) @main.command() @click.argument('ticker') def buy(ticker): # quantity must be 95% of daytrading margin try: wb.get_trade_token('123456') wb.place_order(stock=ticker, price=0, quant=0) except Exception as e: print('Error while placing order') print(e) @main.command() def watch_pos(): try: positions = wb.get_positions() if not(len(positions)>0): print('No positions were found\n') return for i, position in enumerate(positions): print('{}.'.format(i+1), end='') print('\tTicker: {}\n\tPositon: {} shares\n'.format(position['ticker']['symbol'], position['position'])) option_pick = input('Select which position to watch: ') print('Watching', positions[int(option_pick)-1]['ticker']['symbol']) print('Type following commands for {}'.format(positions[int(option_pick)-1]['ticker']['symbol'])) print('(1) Set stop loss at breakeven, (2) Set stop loss slightly above breakeven (3) Set own stop loss') except Exception as e: print('Something went wrong:', e) # Position response # [{'id': 398413331259465728, 'brokerId': 8, 'ticker': {'tickerId': 913324077, 'symbol': 'MU', 'name': 'Micron Tech', 'tinyName': 'Micron Tech', 'listStatus': 1, 'exchangeCode': 'NAS', 'exchangeId': 10, 'type': 2, 'regionId': 6, 'currencyId': 247, 'currencyCode': 'USD', 'secType': [61], 'disExchangeCode': 'NASDAQ', 'disSymbol': 'MU'}, 'position': '850', 'assetType': 'stock', 'cost': '63877.50', 'costPrice': '75.150', 'currency': 'USD', 'lastPrice': '75.08', 'marketValue': '63818.00', 'unrealizedProfitLoss': '-59.50', 'unrealizedProfitLossRate': '-0.0009', 'positionProportion': '1.0000', 'exchangeRate': '1', 'lock': False, 'updatePositionTimeStamp': 1609435085000}] # open market response # {'tickerId': 913254235, 'exchangeId': 10, 'type': 2, 'secType': [61], 'regionId': 6, 'regionCode': 'US', 'currencyId': 247, 'name': 'AMD', 'symbol': 'AMD', 'disSymbol': 'AMD', 'disExchangeCode': 'NASDAQ', 'exchangeCode': 'NAS', 'listStatus': 1, 'template': 'stock', 'derivativeSupport': 1, 'tradeTime': '2020-12-31T15:03:31.110+0000', 'status': 'T', 'close': '91.45', 'change': '-0.85', 'changeRatio': '-0.0092', 'marketValue': '109981965736.91', 'volume': '5151536', 'turnoverRate': '0.0043', 'timeZone': 'America/New_York', 'tzName': 'EST', 'preClose': '92.29', 'open': '92.12', 'high': '92.30', 'low': '90.87', 'vibrateRatio': '0.0155', 'avgVol10D': '30466435', 'avgVol3M': '46084705', 'negMarketValue': '109300183914.11', 'pe': '277.95', 'forwardPe': '74.26', 'indicatedPe': '485.12', 'peTtm': '284.88', 'eps': '0.3290', 'epsTtm': '0.3210', 'pb': '28.42', 'totalShares': '1202711638', 'outstandingShares': '1195255989', 'fiftyTwoWkHigh': '97.98', 'fiftyTwoWkLow': '18.90', 'yield': '0.0000', 'baSize': 1, 'ntvSize': 0, 'askList': [{'price': '91.45', 'volume': '100'}], 'bidList': [{'price': '91.43', 'volume': '300'}], 'currencyCode': 'USD', 'lotSize': '1', 'latestDividendDate': '2000-08-22', 'latestSplitDate': '2000-08-22', 'latestEarningsDate': '2020-10-27', 'ps': '12.76', 'bps': '3.217', 'estimateEarningsDate': '01/26-02/01', 'tradeStatus': 'T'} @main.command() @click.argument('ticker') def stream_stock_price(ticker): global cur_stream_ticker try: cur_stream_ticker = ticker conn = streamconn.StreamConn(debug_flg=False) conn.price_func = on_price_message conn.order_func = on_order_message if not wb._access_token is None and len(wb._access_token) > 1: conn.connect(wb._did, access_token=wb._access_token) else: conn.connect(wb._did) conn.subscribe(tId=wb.get_ticker(ticker)) conn.run_loop_once() conn.run_blocking_loop() except: cur_stream_ticker = '' print('Closing stream') if __name__ == "__main__": main()
import json import re import subprocess import urllib.error import urllib.request import xml.etree.ElementTree as ET from pathlib import Path from typing import List, Optional, Tuple from urllib.parse import urljoin, urlparse from .error import NurError from .manifest import LockedVersion, Repo, RepoType def fetch_commit_from_feed(url: str) -> str: req = urllib.request.Request(url, headers={"User-Agent": "nur-updater"}) res = urllib.request.urlopen(req) try: xml = res.read() root = ET.fromstring(xml) ns = "{http://www.w3.org/2005/Atom}" xpath = f"./{ns}entry/{ns}link" commit_link = root.find(xpath) if commit_link is None: raise NurError(f"No commits found in repository feed {url}") return Path(urlparse(commit_link.attrib["href"]).path).parts[-1] except urllib.error.HTTPError as e: if e.code == 404: raise NurError(f"Repository feed {url} not found") raise def nix_prefetch_zip(url: str) -> Tuple[str, Path]: data = subprocess.check_output( ["nix-prefetch-url", "--name", "source", "--unpack", "--print-path", url] ) sha256, path = data.decode().strip().split("\n") return sha256, Path(path) class GithubRepo: def __init__(self, owner: str, name: str, branch: str) -> None: self.owner = owner self.name = name self.branch = branch def url(self, path: str) -> str: return urljoin(f"https://github.com/{self.owner}/{self.name}/", path) def latest_commit(self) -> str: return fetch_commit_from_feed(self.url(f"commits/{self.branch}.atom")) def prefetch(self, ref: str) -> Tuple[str, Path]: return nix_prefetch_zip(self.url(f"archive/{ref}.tar.gz")) class GitlabRepo: def __init__(self, domain: str, path: List[str], branch: str) -> None: self.domain = domain self.path = path self.branch = branch def latest_commit(self) -> str: path = "/".join(self.path) url = f"https://{self.domain}/{path}/commits/{self.branch}?format=atom" return fetch_commit_from_feed(url) def prefetch(self, ref: str) -> Tuple[str, Path]: escaped_path = "%2F".join(self.path) url = f"https://{self.domain}/api/v4/projects/{escaped_path}/repository/archive.tar.gz?sha={ref}" return nix_prefetch_zip(url) def prefetch_git(repo: Repo) -> Tuple[LockedVersion, Path]: cmd = ["nix-prefetch-git"] if repo.submodules: cmd += ["--fetch-submodules"] cmd += ["--rev", f"refs/heads/{repo.branch}"] cmd += [repo.url.geturl()] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) try: stdout, stderr = proc.communicate(timeout=30) except subprocess.TimeoutExpired: proc.kill() raise NurError( f"Timeout expired while prefetching git repository {repo.url.geturl()}" ) if proc.returncode != 0: raise NurError( f"Failed to prefetch git repository {repo.url.geturl()}: {stderr.decode("utf-8")}" ) metadata = json.loads(stdout) lines = stderr.decode("utf-8").split("\n") repo_path = re.search("path is (.+)", lines[-5]) assert repo_path is not None path = Path(repo_path.group(1)) rev = metadata["rev"] sha256 = metadata["sha256"] return LockedVersion(repo.url, rev, sha256, repo.submodules), path def prefetch_github(repo: Repo) -> Tuple[LockedVersion, Optional[Path]]: github_path = Path(repo.url.path) gh_repo = GithubRepo(github_path.parts[1], github_path.parts[2], repo.branch) commit = gh_repo.latest_commit() locked_version = repo.locked_version if locked_version is not None: if locked_version.rev == commit: return locked_version, None sha256, path = gh_repo.prefetch(commit) return LockedVersion(repo.url, commit, sha256), path def prefetch_gitlab(repo: Repo) -> Tuple[LockedVersion, Optional[Path]]: gitlab_path = Path(repo.url.path) hostname = repo.url.hostname assert hostname is not None, f"Expect a hostname for Gitlab repo: {repo.name}" gl_repo = GitlabRepo(hostname, list(gitlab_path.parts[1:]), repo.branch) commit = gl_repo.latest_commit() locked_version = repo.locked_version if locked_version is not None: if locked_version.rev == commit: return locked_version, None sha256, path = gl_repo.prefetch(commit) return LockedVersion(repo.url, commit, sha256), path def prefetch(repo: Repo) -> Tuple[Repo, LockedVersion, Optional[Path]]: if repo.type == RepoType.GITHUB: locked_version, path = prefetch_github(repo) elif repo.type == RepoType.GITLAB: locked_version, path = prefetch_gitlab(repo) else: locked_version, path = prefetch_git(repo) return repo, locked_version, path
import json import re import subprocess import urllib.error import urllib.request import xml.etree.ElementTree as ET from pathlib import Path from typing import List, Optional, Tuple from urllib.parse import urljoin, urlparse from .error import NurError from .manifest import LockedVersion, Repo, RepoType def fetch_commit_from_feed(url: str) -> str: req = urllib.request.Request(url, headers={"User-Agent": "nur-updater"}) res = urllib.request.urlopen(req) try: xml = res.read() root = ET.fromstring(xml) ns = "{http://www.w3.org/2005/Atom}" xpath = f"./{ns}entry/{ns}link" commit_link = root.find(xpath) if commit_link is None: raise NurError(f"No commits found in repository feed {url}") return Path(urlparse(commit_link.attrib["href"]).path).parts[-1] except urllib.error.HTTPError as e: if e.code == 404: raise NurError(f"Repository feed {url} not found") raise def nix_prefetch_zip(url: str) -> Tuple[str, Path]: data = subprocess.check_output( ["nix-prefetch-url", "--name", "source", "--unpack", "--print-path", url] ) sha256, path = data.decode().strip().split("\n") return sha256, Path(path) class GithubRepo: def __init__(self, owner: str, name: str, branch: str) -> None: self.owner = owner self.name = name self.branch = branch def url(self, path: str) -> str: return urljoin(f"https://github.com/{self.owner}/{self.name}/", path) def latest_commit(self) -> str: return fetch_commit_from_feed(self.url(f"commits/{self.branch}.atom")) def prefetch(self, ref: str) -> Tuple[str, Path]: return nix_prefetch_zip(self.url(f"archive/{ref}.tar.gz")) class GitlabRepo: def __init__(self, domain: str, path: List[str], branch: str) -> None: self.domain = domain self.path = path self.branch = branch def latest_commit(self) -> str: path = "/".join(self.path) url = f"https://{self.domain}/{path}/commits/{self.branch}?format=atom" return fetch_commit_from_feed(url) def prefetch(self, ref: str) -> Tuple[str, Path]: escaped_path = "%2F".join(self.path) url = f"https://{self.domain}/api/v4/projects/{escaped_path}/repository/archive.tar.gz?sha={ref}" return nix_prefetch_zip(url) def prefetch_git(repo: Repo) -> Tuple[LockedVersion, Path]: cmd = ["nix-prefetch-git"] if repo.submodules: cmd += ["--fetch-submodules"] cmd += ["--rev", f"refs/heads/{repo.branch}"] cmd += [repo.url.geturl()] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) try: stdout, stderr = proc.communicate(timeout=30) except subprocess.TimeoutExpired: proc.kill() raise NurError( f"Timeout expired while prefetching git repository {repo.url.geturl()}" ) if proc.returncode != 0: raise NurError( f"Failed to prefetch git repository {repo.url.geturl()}: {stderr.decode('utf-8')}" ) metadata = json.loads(stdout) lines = stderr.decode("utf-8").split("\n") repo_path = re.search("path is (.+)", lines[-5]) assert repo_path is not None path = Path(repo_path.group(1)) rev = metadata["rev"] sha256 = metadata["sha256"] return LockedVersion(repo.url, rev, sha256, repo.submodules), path def prefetch_github(repo: Repo) -> Tuple[LockedVersion, Optional[Path]]: github_path = Path(repo.url.path) gh_repo = GithubRepo(github_path.parts[1], github_path.parts[2], repo.branch) commit = gh_repo.latest_commit() locked_version = repo.locked_version if locked_version is not None: if locked_version.rev == commit: return locked_version, None sha256, path = gh_repo.prefetch(commit) return LockedVersion(repo.url, commit, sha256), path def prefetch_gitlab(repo: Repo) -> Tuple[LockedVersion, Optional[Path]]: gitlab_path = Path(repo.url.path) hostname = repo.url.hostname assert hostname is not None, f"Expect a hostname for Gitlab repo: {repo.name}" gl_repo = GitlabRepo(hostname, list(gitlab_path.parts[1:]), repo.branch) commit = gl_repo.latest_commit() locked_version = repo.locked_version if locked_version is not None: if locked_version.rev == commit: return locked_version, None sha256, path = gl_repo.prefetch(commit) return LockedVersion(repo.url, commit, sha256), path def prefetch(repo: Repo) -> Tuple[Repo, LockedVersion, Optional[Path]]: if repo.type == RepoType.GITHUB: locked_version, path = prefetch_github(repo) elif repo.type == RepoType.GITLAB: locked_version, path = prefetch_gitlab(repo) else: locked_version, path = prefetch_git(repo) return repo, locked_version, path
import base64 import json import os import pathlib import sys import time from test.apps.openapi.schema import OpenAPIVersion from test.utils import HERE, SIMPLE_PATH from urllib.parse import urljoin import hypothesis import pytest import requests import trustme import yaml from _pytest.main import ExitCode from hypothesis import HealthCheck, Phase, Verbosity from schemathesis import Case, DataGenerationMethod, fixups, service from schemathesis.checks import ALL_CHECKS from schemathesis.cli import LoaderConfig, execute, get_exit_code, reset_checks from schemathesis.constants import DEFAULT_RESPONSE_TIMEOUT, USER_AGENT, CodeSampleStyle from schemathesis.hooks import unregister_all from schemathesis.models import APIOperation from schemathesis.runner import DEFAULT_CHECKS, from_schema from schemathesis.runner.impl import threadpool from schemathesis.stateful import Stateful from schemathesis.targets import DEFAULT_TARGETS PHASES = ", ".join(map(lambda x: x.name, Phase)) HEALTH_CHECKS = "|".join(map(lambda x: x.name, HealthCheck)) def test_commands_help(cli): result = cli.main() assert result.exit_code == ExitCode.OK, result.stdout lines = result.stdout.split("\n") assert lines[11] == " auth Authenticate Schemathesis.io." assert lines[12] == " replay Replay requests from a saved cassette." assert lines[13] == " run Perform schemathesis test." result_help = cli.main("--help") result_h = cli.main("-h") assert result.stdout == result_h.stdout == result_help.stdout def test_run_subprocess(testdir): # To verify that CLI entry point is installed properly result = testdir.run("schemathesis") assert result.ret == ExitCode.OK def test_commands_version(cli): result = cli.main("--version") assert result.exit_code == ExitCode.OK, result.stdout assert "version" in result.stdout.split("\n")[0] @pytest.mark.parametrize( "args, error", ( (("run",), "Error: Missing argument 'SCHEMA'."), (("run", "not-url"), "Error: Invalid SCHEMA, must be a valid URL or file path."), (("run", SIMPLE_PATH), 'Error: Missing argument, "--base-url" is required for SCHEMA specified by file.'), (("run", SIMPLE_PATH, "--base-url=test"), "Error: Invalid base URL"), (("run", SIMPLE_PATH, "--base-url=127.0.0.1:8080"), "Error: Invalid base URL"), ( ("run", "http://127.0.0.1", "--request-timeout=-5"), "Error: Invalid value for '--request-timeout': -5 is not in the range x>=1.", ), ( ("run", "http://127.0.0.1", "--request-timeout=0"), "Error: Invalid value for '--request-timeout': 0 is not in the range x>=1.", ), ( ("run", "http://127.0.0.1", "--method=+"), "Error: Invalid value for '--method' / '-M': Invalid regex: nothing to repeat at position 0", ), ( ("run", "http://127.0.0.1", "--auth=123"), "Error: Invalid value for '--auth' / '-a': Should be in KEY:VALUE format. Got: 123", ), ( ("run", "http://127.0.0.1", "--auth=:pass"), "Error: Invalid value for '--auth' / '-a': Username should not be empty", ), ( ("run", "http://127.0.0.1", "--auth=тест:pass"), "Error: Invalid value for '--auth' / '-a': Username should be latin-1 encodable", ), ( ("run", "http://127.0.0.1", "--auth=user:тест"), "Error: Invalid value for '--auth' / '-a': Password should be latin-1 encodable", ), ( ("run", "http://127.0.0.1", "--auth-type=random"), "Error: Invalid value for '--auth-type' / '-A': 'random' is not one of 'basic', 'digest'.", ), ( ("run", "http://127.0.0.1", "--header=123"), "Error: Invalid value for '--header' / '-H': Should be in KEY:VALUE format. Got: 123", ), ( ("run", "http://127.0.0.1", "--header=:"), "Error: Invalid value for '--header' / '-H': Header name should not be empty", ), ( ("run", "http://127.0.0.1", "--header= :"), "Error: Invalid value for '--header' / '-H': Header name should not be empty", ), ( ("run", "http://127.0.0.1", "--hypothesis-phases=explicit,first,second"), "Error: Invalid value for '--hypothesis-phases': invalid choice(s): first, second. " f"Choose from {PHASES}", ), ( ("run", "http://127.0.0.1", "--hypothesis-deadline=wrong"), "Error: Invalid value for '--hypothesis-deadline': wrong is not a valid integer or None", ), ( ("run", "http://127.0.0.1", "--hypothesis-deadline=0"), "Error: Invalid value for '--hypothesis-deadline': 0 is not in the range 1<=x<=86399999913600000.", ), ( ("run", "http://127.0.0.1", "--header=тест:test"), "Error: Invalid value for '--header' / '-H': Header name should be latin-1 encodable", ), ( ("run", "http://127.0.0.1", "--header=test:тест"), "Error: Invalid value for '--header' / '-H': Header value should be latin-1 encodable", ), (("run", "//test"), "Error: Invalid SCHEMA, must be a valid URL or file path."), ( ("run", "http://127.0.0.1", "--max-response-time=0"), "Error: Invalid value for '--max-response-time': 0 is not in the range x>=1.", ), ), ) def test_commands_run_errors(cli, args, error): # When invalid arguments are passed to CLI result = cli.main(*args) # Then an appropriate error should be displayed assert result.exit_code == ExitCode.INTERRUPTED, result.stdout assert result.stdout.strip().split("\n")[-1] == error def test_certificate_only_key(cli, tmp_path): # When cert key is passed without cert itself result = cli.main("run", "http://127.0.0.1", f"--request-cert-key={tmp_path}") # Then an appropriate error should be displayed assert result.exit_code == ExitCode.INTERRUPTED, result.stdout assert ( result.stdout.strip().split("\n")[-1] == 'Error: Missing argument, "--request-cert" should be specified as well.' ) @pytest.mark.parametrize("openapi_version", (OpenAPIVersion("3.0"),)) @pytest.mark.operations("success") def test_certificates(cli, schema_url, mocker): request = mocker.spy(requests.Session, "request") # When a cert is passed via CLI args ca = trustme.CA() cert = ca.issue_cert("test.org") with cert.private_key_pem.tempfile() as cert_path: result = cli.run(schema_url, f"--request-cert={cert_path}") assert result.exit_code == ExitCode.OK, result.stdout # Then both schema & test network calls should use this cert assert len(request.call_args_list) == 2 assert request.call_args_list[0][1]["cert"] == request.call_args_list[1][1]["cert"] == str(cert_path) def test_commands_run_help(cli): result_help = cli.main("run", "--help") assert result_help.exit_code == ExitCode.OK, result_help.stdout assert result_help.stdout.strip().split("\n") == [ "Usage: schemathesis run [OPTIONS] SCHEMA [API_SLUG]", "", " Perform schemathesis test against an API specified by SCHEMA.", "", " SCHEMA must be a valid URL or file path pointing to an Open API / GraphQL", " specification.", "", " API_SLUG is an API identifier to upload data to Schemathesis.io.", "", "Filtering options:", "", " These options define what parts of the API will be tested.", "", " -E, --endpoint TEXT Filter schemathesis tests by API operation path", " pattern. Example: users/\\d+", " -M, --method TEXT Filter schemathesis tests by HTTP method.", " -T, --tag TEXT Filter schemathesis tests by schema tag pattern.", " -O, --operation-id TEXT Filter schemathesis tests by operationId", " pattern.", " --skip-deprecated-operations Skip testing of deprecated API operations.", " [default: False]", "", "Validation options:", "", " Options, responsible for how responses & schemas will be checked.", "", " -c, --checks [not_a_server_error|status_code_conformance|" "content_type_conformance|response_headers_conformance|response_schema_conformance|all]", " List of checks to run. [default:", " not_a_server_error]", " --max-response-time INTEGER RANGE", " A custom check that will fail if the response", " time is greater than the specified one in", " milliseconds. [x>=1]", " --validate-schema BOOLEAN Enable or disable validation of input schema.", " [default: False]", "", "Hypothesis options:", "", " Configuration of the underlying Hypothesis engine.", "", " --hypothesis-deadline INTEGER RANGE", " Duration in milliseconds that each individual", " example with a test is not allowed to exceed.", " [1<=x<=86399999913600000]", " --hypothesis-derandomize Use Hypothesis's deterministic mode.", " --hypothesis-max-examples INTEGER RANGE", " Maximum number of generated examples per each", " method/path combination. [x>=1]", f" --hypothesis-phases [{PHASES.replace(", ", "|")}]", " Control which phases should be run.", " --hypothesis-report-multiple-bugs BOOLEAN", " Raise only the exception with the smallest", " minimal example.", " --hypothesis-seed INTEGER Set a seed to use for all Hypothesis tests.", f" --hypothesis-suppress-health-check [{HEALTH_CHECKS}]", " Comma-separated list of health checks to", " disable.", " --hypothesis-verbosity [quiet|normal|verbose|debug]", " Verbosity level of Hypothesis messages.", "", "Generic options:", " -D, --data-generation-method [positive|negative]", " Defines how Schemathesis generates data for", " tests. [default:", " DataGenerationMethod.positive]", " -t, --target [response_time|all]", " Targets for input generation.", " -x, --exitfirst Exit instantly on first error or failed test.", " [default: False]", " --dry-run Disable sending data to the application and", " checking responses. Helpful to verify whether", " data is generated at all.", " -a, --auth TEXT Server user and password. Example:", " USER:PASSWORD", " -A, --auth-type [basic|digest] The authentication mechanism to be used.", " Defaults to 'basic'. [default: basic]", " -H, --header TEXT Custom header that will be used in all", " requests to the server. Example:", r" Authorization: Bearer\ 123", " -w, --workers [auto|1-64] Number of workers to run tests. [default: 1]", " -b, --base-url TEXT Base URL address of the API, required for", " SCHEMA if specified by file.", " --app TEXT WSGI/ASGI application to test.", " --request-timeout INTEGER RANGE", " Timeout in milliseconds for network requests", " during the test run. [x>=1]", " --request-tls-verify TEXT Controls whether Schemathesis verifies the", " server's TLS certificate. You can also pass", " the path to a CA_BUNDLE file for private", " certs. [default: true]", " --request-cert PATH File path of unencrypted client certificate", " for authentication. The certificate can be", " bundled with a private key (e.g. PEM) or the", " private key can be provided with the", " --request-cert-key argument.", " --request-cert-key PATH File path of the private key of the client", " certificate.", " --junit-xml FILENAME Create junit-xml style report file at given", " path.", " --debug-output-file FILENAME Save debug output as JSON lines in the given", " file.", " --show-errors-tracebacks Show full tracebacks for internal errors.", " [default: False]", " --code-sample-style [python|curl]", " Controls the style of code samples for failure", " reproduction.", " --store-network-log FILENAME Store requests and responses into a file.", " --fixups [fast_api|all] Install specified compatibility fixups.", " --stateful [none|links] Utilize stateful testing capabilities.", " --stateful-recursion-limit INTEGER RANGE", " Limit recursion depth for stateful testing.", " [default: 5; 1<=x<=100]", " --force-schema-version [20|30] Force Schemathesis to parse the input schema", " with the specified spec version.", " --no-color Disable ANSI color escape codes.", " --schemathesis-io-token TEXT Schemathesis.io authentication token.", " --schemathesis-io-url TEXT Schemathesis.io base URL.", " --hosts-file FILE Path to a file to store the Schemathesis.io", " auth configuration.", " -v, --verbosity Reduce verbosity of error output.", " -h, --help Show this message and exit.", ] SCHEMA_URI = "https://example.schemathesis.io/openapi.json" @pytest.mark.parametrize( "args, expected", ( ([], {}), (["--exitfirst"], {"exit_first": True}), (["--workers=2"], {"workers_num": 2}), (["--hypothesis-seed=123"], {"seed": 123}), ( [ "--hypothesis-deadline=1000", "--hypothesis-derandomize", "--hypothesis-max-examples=1000", "--hypothesis-phases=explicit,generate", "--hypothesis-report-multiple-bugs=0", "--hypothesis-suppress-health-check=too_slow,filter_too_much", "--hypothesis-verbosity=normal", ], { "hypothesis_settings": hypothesis.settings( deadline=1000, derandomize=True, max_examples=1000, phases=[Phase.explicit, Phase.generate], report_multiple_bugs=False, suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much], verbosity=Verbosity.normal, ) }, ), (["--hypothesis-deadline=None"], {"hypothesis_settings": hypothesis.settings(deadline=None)}), (["--max-response-time=10"], {"max_response_time": 10}), ), ) def test_from_schema_arguments(cli, mocker, swagger_20, args, expected): mocker.patch("schemathesis.cli.load_schema", return_value=swagger_20) execute = mocker.patch("schemathesis.runner.from_schema", autospec=True) result = cli.run(SCHEMA_URI, *args) expected = { "checks": DEFAULT_CHECKS, "targets": DEFAULT_TARGETS, "workers_num": 1, "exit_first": False, "dry_run": False, "stateful": Stateful.links, "stateful_recursion_limit": 5, "auth": None, "auth_type": "basic", "headers": {}, "request_timeout": DEFAULT_RESPONSE_TIMEOUT, "request_tls_verify": True, "request_cert": None, "store_interactions": False, "seed": None, "max_response_time": None, **expected, } hypothesis_settings = expected.pop("hypothesis_settings", None) call_kwargs = execute.call_args[1] executed_hypothesis_settings = call_kwargs.pop("hypothesis_settings", None) if hypothesis_settings is not None: # Compare non-default Hypothesis settings as `hypothesis.settings` can't be compared assert executed_hypothesis_settings.show_changed() == hypothesis_settings.show_changed() assert call_kwargs == expected @pytest.mark.parametrize( "args, expected", ( (["--auth=test:test"], {"auth": ("test", "test"), "auth_type": "basic"}), (["--auth=test:test", "--auth-type=digest"], {"auth": ("test", "test"), "auth_type": "digest"}), (["--auth=test:test", "--auth-type=DIGEST"], {"auth": ("test", "test"), "auth_type": "digest"}), (["--header=Authorization:Bearer 123"], {"headers": {"Authorization": "Bearer 123"}}), (["--header=Authorization: Bearer 123 "], {"headers": {"Authorization": "Bearer 123 "}}), (["--method=POST", "--method", "GET"], {"method": ("POST", "GET")}), (["--method=POST", "--auth=test:test"], {"auth": ("test", "test"), "auth_type": "basic", "method": ("POST",)}), (["--endpoint=users"], {"endpoint": ("users",)}), (["--tag=foo"], {"tag": ("foo",)}), (["--operation-id=getUser"], {"operation_id": ("getUser",)}), (["--base-url=https://example.com/api/v1test"], {"base_url": "https://example.com/api/v1test"}), ), ) def test_load_schema_arguments(cli, mocker, args, expected): mocker.patch("schemathesis.runner.SingleThreadRunner.execute", autospec=True) load_schema = mocker.patch("schemathesis.cli.load_schema", autospec=True) cli.run(SCHEMA_URI, *args) expected = LoaderConfig( SCHEMA_URI, **{ **{ "app": None, "base_url": None, "auth": None, "auth_type": "basic", "endpoint": None, "headers": {}, "data_generation_methods": [DataGenerationMethod.default()], "method": None, "tag": None, "operation_id": None, "validate_schema": False, "skip_deprecated_operations": False, "force_schema_version": None, "request_tls_verify": True, "request_cert": None, }, **expected, }, ) assert load_schema.call_args[0][0] == expected def test_load_schema_arguments_headers_to_loader_for_app(testdir, cli, mocker): from_wsgi = mocker.patch("schemathesis.specs.openapi.loaders.from_wsgi", autospec=True) module = testdir.make_importable_pyfile( location=""" from test.apps.openapi._flask import create_app app = create_app() """ ) cli.run("/schema.yaml", "--app", f"{module.purebasename}:app", "-H", "Authorization: Bearer 123") assert from_wsgi.call_args[1]["headers"]["Authorization"] == "Bearer 123" def test_all_checks(cli, mocker, swagger_20): mocker.patch("schemathesis.cli.load_schema", return_value=swagger_20) execute = mocker.patch("schemathesis.runner.from_schema", autospec=True) result = cli.run(SCHEMA_URI, "--checks=all") assert execute.call_args[1]["checks"] == ALL_CHECKS @pytest.mark.operations() def test_hypothesis_parameters(cli, schema_url): # When Hypothesis options are passed via command line result = cli.run( schema_url, "--hypothesis-deadline=1000", "--hypothesis-derandomize", "--hypothesis-max-examples=1000", "--hypothesis-phases=explicit,generate", "--hypothesis-report-multiple-bugs=0", "--hypothesis-suppress-health-check=too_slow,filter_too_much", "--hypothesis-verbosity=normal", ) # Then they should be correctly converted into arguments accepted by `hypothesis.settings` # Parameters are validated in `hypothesis.settings` assert result.exit_code == ExitCode.OK, result.stdout @pytest.mark.operations("success") @pytest.mark.parametrize("workers", (1, 2)) def test_cli_run_output_success(cli, cli_args, workers): result = cli.run(*cli_args, f"--workers={workers}") assert result.exit_code == ExitCode.OK, result.stdout lines = result.stdout.split("\n") assert lines[7] == f"Workers: {workers}" if workers == 1: assert lines[10].startswith("GET /api/success .") else: assert lines[10] == "." assert " HYPOTHESIS OUTPUT " not in result.stdout assert " SUMMARY " in result.stdout lines = result.stdout.strip().split("\n") last_line = lines[-1] assert "== 1 passed in " in last_line # And the running time is a small positive number time = float(last_line.split(" ")[-2].replace("s", "")) assert 0 <= time < 5 @pytest.mark.parametrize("workers", (1, 2)) def test_cli_run_output_with_errors(cli, cli_args, workers): result = cli.run(*cli_args, f"--workers={workers}") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert " HYPOTHESIS OUTPUT " not in result.stdout assert " SUMMARY " in result.stdout lines = result.stdout.strip().split("\n") assert "1. Received a response with 5xx status code: 500" in lines assert "Performed checks:" in lines assert " not_a_server_error 1 / 3 passed FAILED " in lines assert "== 1 passed, 1 failed in " in lines[-1] @pytest.mark.operations("failure") @pytest.mark.parametrize("workers", (1, 2)) def test_cli_run_only_failure(cli, cli_args, app_type, workers): result = cli.run(*cli_args, f"--workers={workers}") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert " HYPOTHESIS OUTPUT " not in result.stdout assert " SUMMARY " in result.stdout lines = result.stdout.strip().split("\n") if app_type == "real": assert "Response payload: `500: Internal Server Error`" in lines else: assert "<h1>Internal Server Error</h1>" in lines assert " not_a_server_error 0 / 2 passed FAILED " in lines assert "== 1 failed in " in lines[-1] @pytest.mark.operations("upload_file") def test_cli_binary_body(cli, schema_url, hypothesis_max_examples): result = cli.run( schema_url, "--hypothesis-suppress-health-check=filter_too_much", f"--hypothesis-max-examples={hypothesis_max_examples or 1}", ) assert result.exit_code == ExitCode.OK, result.stdout assert " HYPOTHESIS OUTPUT " not in result.stdout @pytest.mark.operations() @pytest.mark.parametrize("workers", (1, 2)) def test_cli_run_output_empty(cli, cli_args, workers): result = cli.run(*cli_args, f"--workers={workers}") assert result.exit_code == ExitCode.OK, result.stdout assert " HYPOTHESIS OUTPUT " not in result.stdout assert " SUMMARY " in result.stdout lines = result.stdout.strip().split("\n") assert "No checks were performed." in lines assert "= Empty test suite =" in lines[-1] @pytest.mark.operations() @pytest.mark.parametrize("workers", (1, 2)) def test_cli_run_changed_base_url(cli, server, cli_args, workers): # When the CLI receives custom base URL base_url = f"http://127.0.0.1:{server["port"]}/api" result = cli.run(*cli_args, "--base-url", base_url, f"--workers={workers}") # Then the base URL should be correctly displayed in the CLI output lines = result.stdout.strip().split("\n") assert lines[5] == f"Base URL: {base_url}" @pytest.mark.parametrize( "url, message", ( ("/doesnt_exist", "Schema was not found at http://127.0.0.1"), ("/failure", "Failed to load schema, code 500 was returned from http://127.0.0.1"), ), ) @pytest.mark.operations("failure") @pytest.mark.parametrize("workers", (1, 2)) def test_execute_missing_schema(cli, openapi3_base_url, url, message, workers): result = cli.run(f"{openapi3_base_url}{url}", f"--workers={workers}") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert message in result.stdout @pytest.mark.operations("success", "slow") @pytest.mark.parametrize("workers", (1, 2)) def test_hypothesis_failed_event(cli, cli_args, workers): # When the Hypothesis deadline option is set manually, and it is smaller than the response time result = cli.run(*cli_args, "--hypothesis-deadline=20", f"--workers={workers}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And the given operation should be displayed as an error lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("GET /api/slow E") else: # It could be in any sequence, because of multiple threads assert lines[10].split("\n")[0] in ("E.", ".E", "EE") # empty line after all tests progress output assert lines[11] == "" # And the proper error message should be displayed assert "DeadlineExceeded: API response time is too slow! " in result.stdout assert "which exceeds the deadline of 20.00ms" in result.stdout # And the CLI should not suggest showing full tracebacks to the user assert "Add this option to your command line parameters to see full tracebacks" not in result.stdout @pytest.mark.operations("success", "slow") @pytest.mark.parametrize("workers", (1, 2)) def test_connection_timeout(cli, server, schema_url, workers): # When connection timeout is specified in the CLI and the request fails because of it result = cli.run(schema_url, "--request-timeout=80", f"--workers={workers}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And the given operation should be displayed as a failure lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("GET /api/slow F") assert lines[11].startswith("GET /api/success .") else: # It could be in any sequence, because of multiple threads assert lines[10].split("\n")[0] in ("F.", ".F") # And the proper error message should be displayed assert "1. Response timed out after 80.00ms" in result.stdout @pytest.mark.operations("success", "slow") @pytest.mark.parametrize("workers", (1, 2)) def test_default_hypothesis_settings(cli, cli_args, workers): # When there is a slow operation and if it is faster than 15s result = cli.run(*cli_args, f"--workers={workers}") # Then the tests should pass, because of default 15s deadline assert result.exit_code == ExitCode.OK, result.stdout lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("GET /api/slow .") assert lines[11].startswith("GET /api/success .") else: # It could be in any sequence, because of multiple threads assert lines[10] == ".." @pytest.mark.operations("failure") @pytest.mark.parametrize("workers", (1, 2)) def test_seed(cli, cli_args, workers): # When there is a failure result = cli.run(*cli_args, "--hypothesis-seed=456", f"--workers={workers}") # Then the tests should fail and RNG seed should be displayed assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert "Or add this option to your command line parameters: --hypothesis-seed=456" in result.stdout.split("\n") @pytest.mark.operations("unsatisfiable") @pytest.mark.parametrize("workers", (1, 2)) def test_unsatisfiable(cli, cli_args, workers): # When the app's schema contains parameters that can't be generated # For example if it contains contradiction in the parameters definition - requires to be integer AND string at the # same time result = cli.run(*cli_args, f"--workers={workers}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And standard Hypothesis error should not appear in the output assert "You can add @seed" not in result.stdout # And this operation should be marked as errored in the progress line lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("POST /api/unsatisfiable E") else: assert lines[10] == "E" # And more clear error message is displayed instead of Hypothesis one lines = result.stdout.split("\n") assert "hypothesis.errors.Unsatisfiable: Unable to satisfy schema parameters for this API operation" in lines @pytest.mark.operations("flaky") @pytest.mark.parametrize("workers", (1, 2)) def test_flaky(cli, cli_args, workers): # When the operation fails / succeeds randomly # Derandomize is needed for reproducible test results result = cli.run(*cli_args, "--hypothesis-derandomize", f"--workers={workers}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And standard Hypothesis error should not appear in the output assert "Failed to reproduce exception. Expected:" not in result.stdout # And this operation should be marked as errored in the progress line lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("GET /api/flaky E") else: assert lines[10] == "E" # And it should be displayed only once in "ERRORS" section assert "= ERRORS =" in result.stdout assert "_ GET /api/flaky [P] _" in result.stdout # And it should not go into "FAILURES" section assert "= FAILURES =" not in result.stdout # And more clear error message is displayed instead of Hypothesis one lines = result.stdout.split("\n") assert "hypothesis.errors.Flaky: Tests on this API operation produce unreliable results: " in lines assert "Falsified on the first call but did not on a subsequent one" in lines # And example is displayed assert "Query : {'id': 0}" in lines @pytest.mark.operations("invalid") @pytest.mark.parametrize("workers", (1,)) def test_invalid_operation(cli, cli_args, workers): # When the app's schema contains errors # For example if its type is "int" but should be "integer" # And schema validation is disabled result = cli.run(*cli_args, f"--workers={workers}", "--validate-schema=false") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And standard Hypothesis error should not appear in the output assert "You can add @seed" not in result.stdout # And this operation should be marked as errored in the progress line lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("POST /api/invalid E") else: assert lines[10] == "E" assert " POST /api/invalid " in lines[13] # There shouldn't be a section end immediately after section start - there should be some error text # An internal error happened during a test run # Error: AssertionError assert not lines[14].startswith("=") @pytest.mark.operations("invalid") def test_invalid_operation_suggestion(cli, cli_args): # When the app's schema contains errors result = cli.run(*cli_args, "--validate-schema=true") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And there should be a suggestion to disable schema validation expected = """You can disable input schema validation with --validate-schema=false command-line option In this case, Schemathesis cannot guarantee proper behavior during the test run """ assert expected in result.stdout @pytest.mark.operations("invalid") def test_invalid_operation_suggestion_disabled(cli, cli_args): # When the app's schema contains errors # And schema validation is disabled result = cli.run(*cli_args, "--validate-schema=false") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And there should be no suggestion assert "You can disable input schema validation" not in result.stdout @pytest.mark.operations("teapot") @pytest.mark.parametrize("workers", (1, 2)) def test_status_code_conformance(cli, cli_args, workers): # When operation returns a status code, that is not listed in "responses" # And "status_code_conformance" is specified result = cli.run(*cli_args, "-c", "status_code_conformance", f"--workers={workers}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And this operation should be marked as failed in the progress line lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("POST /api/teapot F") else: assert lines[10] == "F" assert "status_code_conformance 0 / 2 passed FAILED" in result.stdout lines = result.stdout.split("\n") assert "1. Received a response with a status code, which is not defined in the schema: 418" in lines assert lines[16].strip() == "Declared status codes: 200" @pytest.mark.operations("headers") def test_headers_conformance_invalid(cli, cli_args): result = cli.run(*cli_args, "-c", "response_headers_conformance") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout lines = result.stdout.split("\n") assert "1. Received a response with missing headers: X-Custom-Header" in lines @pytest.mark.operations("headers") def test_headers_conformance_valid(cli, cli_args): result = cli.run(*cli_args, "-c", "response_headers_conformance", "-H", "X-Custom-Header: bla") assert result.exit_code == ExitCode.OK, result.stdout lines = result.stdout.split("\n") assert "1. Received a response with missing headers: X-Custom-Header" not in lines @pytest.mark.operations("multiple_failures") def test_multiple_failures_single_check(cli, schema_url): result = cli.run(schema_url, "--hypothesis-seed=1", "--hypothesis-derandomize") assert "= HYPOTHESIS OUTPUT =" not in result.stdout assert "Hypothesis found 2 distinct failures" not in result.stdout lines = result.stdout.strip().split("\n") assert "1. Received a response with 5xx status code: 500" in lines assert "2. Received a response with 5xx status code: 504" in lines assert "1 failed in " in lines[-1] @pytest.mark.operations("multiple_failures") def test_multiple_failures_different_check(cli, schema_url): result = cli.run( schema_url, "-c", "status_code_conformance", "-c", "not_a_server_error", "--hypothesis-derandomize", "--hypothesis-seed=1", ) assert "= HYPOTHESIS OUTPUT =" not in result.stdout lines = result.stdout.strip().split("\n") assert "1. Received a response with a status code, which is not defined in the schema: 500" in lines assert "2. Received a response with 5xx status code: 500" in lines assert "3. Received a response with a status code, which is not defined in the schema: 504" in lines assert "4. Received a response with 5xx status code: 504" in lines assert "1 failed in " in lines[-1] @pytest.mark.parametrize("workers", (1, 2)) def test_connection_error(cli, schema_url, workers): # When the given base_url is unreachable result = cli.run(schema_url, "--base-url=http://127.0.0.1:1/api", f"--workers={workers}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And all collected API operations should be marked as errored lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("GET /api/failure E") assert lines[11].startswith("GET /api/success E") else: assert lines[10] == "EE" # And errors section title should be displayed assert "= ERRORS =" in result.stdout # And all API operations should be mentioned in this section as subsections assert "_ GET /api/success [P] _" in result.stdout assert "_ GET /api/failure [P] _" in result.stdout # And the proper error messages should be displayed for each operation assert "Max retries exceeded with url: /api/success" in result.stdout assert "Max retries exceeded with url: /api/failure" in result.stdout @pytest.mark.parametrize("workers", (1, 2)) def test_schema_not_available(cli, workers): # When the given schema is unreachable result = cli.run("http://127.0.0.1:1/schema.yaml", f"--workers={workers}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And error message is displayed lines = result.stdout.split("\n") assert lines[0] == "Failed to load schema from http://127.0.0.1:1/schema.yaml" assert lines[1].startswith( "Error: requests.exceptions.ConnectionError: HTTPConnectionPool(host='127.0.0.1', port=1): " "Max retries exceeded with url: /schema.yaml" ) def test_schema_not_available_wsgi(cli, loadable_flask_app): # When the given schema is unreachable result = cli.run("unknown.yaml", f"--app={loadable_flask_app}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And error message is displayed lines = result.stdout.split("\n") assert lines[0] == "Schema was not found at unknown.yaml" @pytest.mark.operations("custom_format") def test_pre_run_hook_valid(testdir, cli, schema_url, app): # When `--pre-run` hook is passed to the CLI call module = testdir.make_importable_pyfile( hook=""" import string import schemathesis from hypothesis import strategies as st schemathesis.register_string_format( "digits", st.text( min_size=1, alphabet=st.characters( whitelist_characters=string.digits, whitelist_categories=() ) ) ) """ ) result = cli.main( "--pre-run", module.purebasename, "run", "--hypothesis-suppress-health-check=filter_too_much", schema_url ) # Then CLI should run successfully assert result.exit_code == ExitCode.OK, result.stdout # And all registered new string format should produce digits as expected assert all(request.query["id"].isdigit() for request in app["incoming_requests"]) def test_pre_run_hook_invalid(testdir, cli): # When `--pre-run` hook is passed to the CLI call # And its importing causes an exception module = testdir.make_importable_pyfile(hook="1 / 0") result = cli.main("--pre-run", module.purebasename, "run", "http://127.0.0.1:1") # Then CLI run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And a helpful message should be displayed in the output lines = result.stdout.strip().split("\n") assert lines[0] == "An exception happened during the hook loading:" assert lines[7] == "ZeroDivisionError: division by zero" assert lines[9] == "Aborted!" def test_pre_run_hook_module_not_found(testdir, cli): testdir.makepyfile(hook="1 / 0") result = cli.main("--pre-run", "hook", "run", "http://127.0.0.1:1") assert os.getcwd() in sys.path # Then CLI run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert "ModuleNotFoundError" not in result.stdout lines = result.stdout.strip().split("\n") assert lines[0] == "An exception happened during the hook loading:" assert lines[7] == "ZeroDivisionError: division by zero" assert lines[9] == "Aborted!" @pytest.mark.usefixtures("reset_hooks") def test_conditional_checks(testdir, cli, hypothesis_max_examples, schema_url): module = testdir.make_importable_pyfile( hook=""" import schemathesis import click @schemathesis.register_check def conditional_check(response, case): # skip this check return True """ ) result = cli.main( "--pre-run", module.purebasename, "run", "-c", "conditional_check", schema_url, f"--hypothesis-max-examples={hypothesis_max_examples or 1}", ) assert result.exit_code == ExitCode.OK # One additional case created for two API operations - /api/failure and /api/success. assert "No checks were performed." in result.stdout @pytest.mark.usefixtures("reset_hooks") def test_add_case(testdir, cli, hypothesis_max_examples, schema_url): module = testdir.make_importable_pyfile( hook=""" import schemathesis import click @schemathesis.hooks.register def add_case(context, case, response): if not case.headers: case.headers = {} case.headers["copy"] = "this is a copied case" return case @schemathesis.register_check def add_case_check(response, case): if case.headers and case.headers.get("copy") == "this is a copied case": # we will look for this output click.echo("The case was added!") """ ) result = cli.main( "--pre-run", module.purebasename, "run", "-c", "add_case_check", schema_url, f"--hypothesis-max-examples={hypothesis_max_examples or 1}", ) assert result.exit_code == ExitCode.OK # One additional case created for two API operations - /api/failure and /api/success. assert result.stdout.count("The case was added!") == 2 @pytest.mark.usefixtures("reset_hooks") def test_add_case_returns_none(testdir, cli, hypothesis_max_examples, schema_url): """Tests that no additional test case created when the add_case hook returns None.""" module = testdir.make_importable_pyfile( hook=""" import schemathesis import click @schemathesis.hooks.register def add_case(context, case, response): return None @schemathesis.register_check def add_case_check(response, case): click.echo("Validating case.") """ ) result = cli.main( "--pre-run", module.purebasename, "run", "-c", "add_case_check", schema_url, f"--hypothesis-max-examples={hypothesis_max_examples or 1}", ) assert result.exit_code == ExitCode.OK # with --hypothesis-max-examples=1 and 2 API operations, only two cases should be created and validated. # If the count is greater than 2, additional test cases should not have been created but were created. assert result.stdout.count("Validating case.") == 2 @pytest.mark.usefixtures("reset_hooks") def test_multiple_add_case_hooks(testdir, cli, hypothesis_max_examples, schema_url): """add_case hooks that mutate the case in place should not affect other cases.""" module = testdir.make_importable_pyfile( hook=""" import schemathesis import click @schemathesis.hooks.register("add_case") def add_first_header(context, case, response): if not case.headers: case.headers = {} case.headers["first"] = "first header" return case @schemathesis.hooks.register("add_case") def add_second_header(context, case, response): if not case.headers: case.headers = {} case.headers["second"] = "second header" return case @schemathesis.register_check def add_case_check(response, case): if case.headers and case.headers.get("first") == "first header": # we will look for this output click.echo("First case added!") if case.headers and case.headers.get("second") == "second header": # we will look for this output click.echo("Second case added!") """ ) result = cli.main( "--pre-run", module.purebasename, "run", "-c", "add_case_check", schema_url, f"--hypothesis-max-examples={hypothesis_max_examples or 1}", ) assert result.exit_code == ExitCode.OK # Each header should only be duplicated once for each API operation - /api/failure and /api/success. assert result.stdout.count("First case added!") == 2 assert result.stdout.count("Second case added!") == 2 @pytest.mark.usefixtures("reset_hooks") def test_add_case_output(testdir, cli, hypothesis_max_examples, schema_url): module = testdir.make_importable_pyfile( hook=""" import schemathesis import click @schemathesis.hooks.register("add_case") def add_first_header(context, case, response): if not case.headers: case.headers = {} case.headers["first"] = "first header" return case @schemathesis.hooks.register("add_case") def add_second_header(context, case, response): if not case.headers: case.headers = {} case.headers["second"] = "second header" return case @schemathesis.register_check def add_case_check(response, case): if ( case.headers and ( case.headers.get("second") == "second header" ) ): assert False, "failing cases from second add_case hook" """ ) result = cli.main( "--pre-run", module.purebasename, "run", "-c", "add_case_check", schema_url, f"--hypothesis-max-examples={hypothesis_max_examples or 1}", ) assert result.exit_code == ExitCode.TESTS_FAILED assert result.stdout.count("failing cases from second add_case hook") == 2 add_case_check_line = next( filter(lambda line: line.strip().startswith("add_case_check"), result.stdout.split("\n")) ) assert "8 / 12" in add_case_check_line @pytest.fixture def reset_hooks(): yield unregister_all() reset_checks() @pytest.fixture( params=[ ('AssertionError("Custom check failed!")', "1. Custom check failed!"), ("AssertionError", "1. Check 'new_check' failed"), ] ) def new_check(request, testdir, cli): exception, message = request.param module = testdir.make_importable_pyfile( hook=f""" import schemathesis @schemathesis.register_check def new_check(response, result): raise {exception} """ ) yield module, message reset_checks() # To verify that "new_check" is unregistered result = cli.run("--help") lines = result.stdout.splitlines() assert ( " -c, --checks [not_a_server_error|status_code_conformance|content_type_conformance|" "response_headers_conformance|response_schema_conformance|all]" in lines ) @pytest.mark.operations("success") def test_register_check(new_check, cli, schema_url): new_check, message = new_check # When `--pre-run` hook is passed to the CLI call # And it contains registering a new check, which always fails for the testing purposes result = cli.main("--pre-run", new_check.purebasename, "run", "-c", "new_check", schema_url) # Then CLI run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And a message from the new check should be displayed lines = result.stdout.strip().split("\n") assert lines[14] == message def assert_threaded_executor_interruption(lines, expected, optional_interrupt=False): # It is possible to have a case when first call without an error will start processing # But after, another thread will have interruption and will push this event before the # first thread will finish. Race condition: "" is for this case and "." for the other # way around # The app under test was killed ungracefully and since we run it in a child or the main thread # its output might occur in the captured stdout. ignored_exception = "Exception ignored in: " in lines[10] assert lines[10] in expected or ignored_exception, lines if not optional_interrupt: assert "!! KeyboardInterrupt !!" in lines[11], lines assert any("=== SUMMARY ===" in line for line in lines[10:]) @pytest.mark.parametrize("workers", (1, 2)) @pytest.mark.filterwarnings("ignore:Exception in thread") def test_keyboard_interrupt(cli, cli_args, base_url, mocker, flask_app, swagger_20, workers): # When a Schemathesis run in interrupted by keyboard or via SIGINT operation = APIOperation("/success", "GET", {}, swagger_20, base_url=base_url) if len(cli_args) == 2: operation.app = flask_app original = Case(operation).call_wsgi else: original = Case(operation).call counter = 0 def mocked(*args, **kwargs): nonlocal counter counter += 1 if counter > 1: # For threaded case it emulates SIGINT for the worker thread raise KeyboardInterrupt return original(*args, **kwargs) if len(cli_args) == 2: mocker.patch("schemathesis.Case.call_wsgi", wraps=mocked) else: mocker.patch("schemathesis.Case.call", wraps=mocked) result = cli.run(*cli_args, f"--workers={workers}") assert result.exit_code == ExitCode.OK, result.stdout # Then execution stops, and a message about interruption is displayed lines = result.stdout.strip().split("\n") # And summary is still displayed in the end of the output if workers == 1: assert lines[10].startswith("GET /api/failure .") assert lines[10].endswith("[ 50%]") assert lines[11] == "GET /api/success " assert "!! KeyboardInterrupt !!" in lines[12] assert "== SUMMARY ==" in lines[14] else: assert_threaded_executor_interruption(lines, ("", ".")) @pytest.mark.filterwarnings("ignore:Exception in thread") def test_keyboard_interrupt_threaded(cli, cli_args, mocker): # When a Schemathesis run is interrupted by the keyboard or via SIGINT original = time.sleep counter = 0 def mocked(*args, **kwargs): nonlocal counter counter += 1 if counter > 1: raise KeyboardInterrupt return original(*args, **kwargs) mocker.patch("schemathesis.runner.impl.threadpool.time.sleep", autospec=True, wraps=mocked) result = cli.run(*cli_args, "--workers=2", "--hypothesis-derandomize") # the exit status depends on what thread finished first assert result.exit_code in (ExitCode.OK, ExitCode.TESTS_FAILED), result.stdout # Then execution stops, and a message about interruption is displayed lines = result.stdout.strip().split("\n") # There are many scenarios possible, depends on how many tests will be executed before interruption # and in what order. it could be no tests at all, some of them or all of them. assert_threaded_executor_interruption(lines, ("F", ".", "F.", ".F", ""), True) @pytest.mark.operations("failure") @pytest.mark.parametrize("workers", (1, 2)) def test_hypothesis_output_capture(mocker, cli, cli_args, workers): mocker.patch("schemathesis.utils.IGNORED_PATTERNS", ()) result = cli.run(*cli_args, f"--workers={workers}") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert "= HYPOTHESIS OUTPUT =" in result.stdout assert "Falsifying example" in result.stdout async def test_multiple_files_schema(openapi_2_app, testdir, cli, hypothesis_max_examples, openapi2_base_url): # When the schema contains references to other files uri = pathlib.Path(HERE).as_uri() + "/" schema = { "swagger": "2.0", "info": {"title": "Example API", "description": "An API to test Schemathesis", "version": "1.0.0"}, "host": "127.0.0.1:8888", "basePath": "/api", "schemes": ["http"], "produces": ["application/json"], "paths": { "/teapot": { "post": { "parameters": [ { # during the CLI run we have a different working directory, # so specifying an absolute uri "schema": {"$ref": urljoin(uri, "data/petstore_v2.yaml#/definitions/Pet")}, "in": "body", "name": "user", "required": True, } ], "responses": {"200": {"description": "OK"}}, } } }, } openapi_2_app["config"].update({"should_fail": True, "schema_data": schema}) schema_file = testdir.makefile(".yaml", schema=yaml.dump(schema)) # And file path is given to the CLI result = cli.run( str(schema_file), f"--base-url={openapi2_base_url}", f"--hypothesis-max-examples={hypothesis_max_examples or 5}", "--hypothesis-derandomize", ) # Then Schemathesis should resolve it and run successfully assert result.exit_code == ExitCode.OK, result.stdout # And all relevant requests should contain proper data for resolved references payload = await openapi_2_app["incoming_requests"][0].json() assert isinstance(payload["name"], str) assert isinstance(payload["photoUrls"], list) def test_wsgi_app(testdir, cli): module = testdir.make_importable_pyfile( location=""" from test.apps.openapi._flask import create_app app = create_app() """ ) result = cli.run("/schema.yaml", "--app", f"{module.purebasename}:app") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert "1 passed, 1 failed in" in result.stdout def test_wsgi_app_exception(testdir, cli): module = testdir.make_importable_pyfile( location=""" from test.apps.openapi._flask import create_app 1 / 0 """ ) result = cli.run("/schema.yaml", "--app", f"{module.purebasename}:app", "--show-errors-tracebacks") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert "Traceback (most recent call last):" in result.stdout assert "ZeroDivisionError: division by zero" in result.stdout def test_wsgi_app_missing(testdir, cli): module = testdir.make_importable_pyfile( location=""" from test.apps.openapi._flask import create_app """ ) result = cli.run("/schema.yaml", "--app", f"{module.purebasename}:app") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout lines = result.stdout.strip().split("\n") assert "AttributeError: module 'location' has no attribute 'app'" in lines assert "Can not import application from the given module!" in lines def test_wsgi_app_internal_exception(testdir, cli): module = testdir.make_importable_pyfile( location=""" from test.apps.openapi._flask import create_app app = create_app() app.config["internal_exception"] = True """ ) result = cli.run("/schema.yaml", "--app", f"{module.purebasename}:app", "--hypothesis-derandomize") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout lines = result.stdout.strip().split("\n") assert "== APPLICATION LOGS ==" in lines[48], result.stdout.strip() assert "ERROR in app: Exception on /api/success [GET]" in lines[50] assert lines[66] == "ZeroDivisionError: division by zero" @pytest.mark.parametrize("args", ((), ("--base-url",))) def test_aiohttp_app(openapi_version, request, cli, loadable_aiohttp_app, args): # When a URL is passed together with app if args: args += (request.getfixturevalue("base_url"),) result = cli.run("/schema.yaml", "--app", loadable_aiohttp_app, *args) # Then the schema should be loaded from that URL assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert "1 passed, 1 failed in" in result.stdout def test_wsgi_app_remote_schema(cli, schema_url, loadable_flask_app): # When a URL is passed together with app result = cli.run(schema_url, "--app", loadable_flask_app) # Then the schema should be loaded from that URL assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert "1 passed, 1 failed in" in result.stdout def test_wsgi_app_path_schema(cli, loadable_flask_app): # When an existing path to schema is passed together with app result = cli.run(SIMPLE_PATH, "--app", loadable_flask_app) # Then the schema should be loaded from that path assert result.exit_code == ExitCode.OK, result.stdout assert "1 passed in" in result.stdout def test_multipart_upload(testdir, tmp_path, hypothesis_max_examples, openapi3_base_url, cli): cassette_path = tmp_path / "output.yaml" # When requestBody has a binary field or an array of binary items responses = {"200": {"description": "OK", "content": {"application/json": {"schema": {"type": "object"}}}}} schema = { "openapi": "3.0.0", "info": {"title": "Sample API", "description": "API description in Markdown.", "version": "1.0.0"}, "paths": { "/property": { "post": { "requestBody": { "required": True, "content": { "multipart/form-data": { "schema": { "type": "object", "properties": {"file": {"type": "string", "format": "binary"}}, "required": ["file"], } } }, }, "responses": responses, } }, "/array": { "post": { "requestBody": { "required": True, "content": { "multipart/form-data": { "schema": { "type": "object", "properties": { "files": {"type": "array", "items": {"type": "string", "format": "binary"}} }, "required": ["files"], } } }, }, "responses": responses, } }, }, "servers": [{"url": "https://api.example.com/{basePath}", "variables": {"basePath": {"default": "v1"}}}], } schema_file = testdir.makefile(".yaml", schema=yaml.dump(schema)) result = cli.run( str(schema_file), f"--base-url={openapi3_base_url}", f"--hypothesis-max-examples={hypothesis_max_examples or 5}", "--show-errors-tracebacks", "--hypothesis-derandomize", f"--store-network-log={cassette_path}", ) # Then it should be correctly sent to the server assert result.exit_code == ExitCode.OK, result.stdout assert "= ERRORS =" not in result.stdout with cassette_path.open() as fd: cassette = yaml.safe_load(fd) def decode(idx): request = cassette["http_interactions"][idx]["request"] if "body" not in request: return None return base64.b64decode(request["body"]["base64_string"]) first_decoded = decode(0) if first_decoded: assert b'Content-Disposition: form-data; name="files"; filename="files"\r\n' in first_decoded last_decoded = decode(-1) if last_decoded: assert b'Content-Disposition: form-data; name="file"; filename="file"\r\n' in last_decoded # NOTE, that the actual API operation is not checked in this test @pytest.mark.operations("form") def test_urlencoded_form(cli, cli_args): # When the API operation accepts application/x-www-form-urlencoded result = cli.run(*cli_args) # Then Schemathesis should generate appropriate payload assert result.exit_code == ExitCode.OK, result.stdout @pytest.mark.parametrize("workers", (1, 2)) @pytest.mark.operations("success") def test_targeted(mocker, cli, cli_args, workers): target = mocker.spy(hypothesis, "target") result = cli.run(*cli_args, f"--workers={workers}", "--target=response_time") assert result.exit_code == ExitCode.OK, result.stdout target.assert_called_with(mocker.ANY, label="response_time") def test_chained_internal_exception(testdir, cli, hypothesis_max_examples, openapi3_base_url): # When schema contains an error that causes an internal error in `jsonschema` raw_schema = { "openapi": "3.0.2", "info": {"title": "Test", "description": "Test", "version": "0.1.0"}, "paths": { "/users": { "get": { "responses": { # Response code should be a string 200: {"description": "OK", "content": {"application/json": {"schema": {"type": "object"}}}} } } } }, } schema_file = testdir.makefile(".yaml", schema=yaml.dump(raw_schema)) result = cli.run( str(schema_file), f"--base-url={openapi3_base_url}", f"--hypothesis-max-examples={hypothesis_max_examples or 1}", "--show-errors-tracebacks", "--validate-schema=true", ) assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout lines = result.stdout.splitlines() assert "The above exception was the direct cause of the following exception:" in lines @pytest.mark.parametrize( "options, expected", ( ( ("--skip-deprecated-operations",), "Collected API operations: 1", ), ( (), "Collected API operations: 2", ), ), ) def test_skip_deprecated_operations(testdir, cli, openapi3_base_url, options, expected): # When there are some deprecated API operations definition = { "responses": {"200": {"description": "OK", "content": {"application/json": {"schema": {"type": "object"}}}}} } raw_schema = { "openapi": "3.0.2", "info": {"title": "Test", "description": "Test", "version": "0.1.0"}, "paths": { "/users": { "get": definition, "post": { "deprecated": True, **definition, }, } }, } schema_file = testdir.makefile(".yaml", schema=yaml.dump(raw_schema)) result = cli.run(str(schema_file), f"--base-url={openapi3_base_url}", "--hypothesis-max-examples=1", *options) assert result.exit_code == ExitCode.OK, result.stdout # Then only not deprecated API operations should be selected assert expected in result.stdout.splitlines() @pytest.fixture() def fast_api_fixup(): yield fixups.uninstall() @pytest.mark.parametrize("fixup", ("all", "fast_api")) def test_fast_api_fixup(testdir, cli, base_url, fast_api_schema, hypothesis_max_examples, fast_api_fixup, fixup): # When schema contains Draft 7 definitions as ones from FastAPI may contain schema_file = testdir.makefile(".yaml", schema=yaml.dump(fast_api_schema)) result = cli.run( str(schema_file), f"--base-url={base_url}", f"--hypothesis-max-examples={hypothesis_max_examples or 1}", f"--fixups={fixup}", ) assert result.exit_code == ExitCode.OK, result.stdout @pytest.mark.operations("success") def test_colon_in_headers(cli, schema_url, app): header = "X-FOO" value = "bar:spam" result = cli.run(schema_url, f"--header={header}:{value}") assert result.exit_code == ExitCode.OK assert app["incoming_requests"][0].headers[header] == value @pytest.mark.operations("create_user", "get_user", "update_user") def test_openapi_links(cli, cli_args, schema_url, hypothesis_max_examples): # When the schema contains Open API links or Swagger 2 extension for links # And these links are nested - API operations in these links contain links to another operations result = cli.run( *cli_args, f"--hypothesis-max-examples={hypothesis_max_examples or 2}", "--hypothesis-seed=1", "--hypothesis-derandomize", "--hypothesis-deadline=None", "--show-errors-tracebacks", ) lines = result.stdout.splitlines() # Note, it might fail if it uncovers the placed bug, which this version of stateful testing should not uncover # It is pretty rare and requires a high number for the `max_examples` setting. This version is staged for removal # Therefore it won't be fixed assert result.exit_code == ExitCode.OK, result.stdout # Then these links should be tested # And lines with the results of these tests should be indented assert lines[11].startswith(" -> GET /api/users/{user_id} .") # And percentage should be adjusted appropriately assert lines[11].endswith("[ 50%]") assert lines[12].startswith(" -> PATCH /api/users/{user_id} .") assert lines[12].endswith("[ 60%]") assert lines[13].startswith(" -> PATCH /api/users/{user_id} .") assert lines[13].endswith("[ 66%]") @pytest.mark.operations("create_user", "get_user", "update_user") def test_openapi_links_disabled(cli, schema_url, hypothesis_max_examples): # When the user disabled Open API links usage result = cli.run( schema_url, f"--hypothesis-max-examples={hypothesis_max_examples or 2}", "--hypothesis-seed=1", "--hypothesis-derandomize", "--hypothesis-deadline=None", "--show-errors-tracebacks", "--stateful=none", ) lines = result.stdout.splitlines() assert result.exit_code == ExitCode.OK, result.stdout # Then the links should not be traversed assert lines[10].startswith("POST /api/users/ .") assert lines[11].startswith("GET /api/users/{user_id} .") assert lines[12].startswith("PATCH /api/users/{user_id} .") @pytest.mark.parametrize("recursion_limit, expected", ((1, "....."), (5, "......"))) @pytest.mark.operations("create_user", "get_user", "update_user") def test_openapi_links_multiple_threads(cli, cli_args, schema_url, recursion_limit, hypothesis_max_examples, expected): # When the schema contains Open API links or Swagger 2 extension for links # And these links are nested - API operations in these links contain links to another operations result = cli.run( *cli_args, f"--hypothesis-max-examples={hypothesis_max_examples or 1}", "--hypothesis-seed=1", "--hypothesis-derandomize", "--hypothesis-deadline=None", "--hypothesis-suppress-health-check=too_slow,filter_too_much", "--show-errors-tracebacks", f"--stateful-recursion-limit={recursion_limit}", "--workers=2", ) lines = result.stdout.splitlines() assert result.exit_code == ExitCode.OK, result.stdout assert lines[10] == expected + "." if hypothesis_max_examples else expected def test_get_request_with_body(testdir, cli, base_url, hypothesis_max_examples, schema_with_get_payload): schema_file = testdir.makefile(".yaml", schema=yaml.dump(schema_with_get_payload)) result = cli.run( str(schema_file), f"--base-url={base_url}", f"--hypothesis-max-examples={hypothesis_max_examples or 1}", "--show-errors-tracebacks", "--validate-schema=true", ) assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout lines = result.stdout.splitlines() assert "InvalidSchema: Body parameters are defined for GET request." in lines @pytest.mark.operations("slow") @pytest.mark.parametrize("workers", (1, 2)) def test_max_response_time_invalid(cli, server, schema_url, workers): # When maximum response time check is specified in the CLI and the request takes more time result = cli.run(schema_url, "--max-response-time=50", f"--workers={workers}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And the given operation should be displayed as a failure lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("GET /api/slow F") else: assert lines[10].startswith("F") # And the proper error message should be displayed assert "max_response_time 0 / 2 passed FAILED" in result.stdout assert "Response time exceeded the limit of 50 ms" in result.stdout @pytest.mark.operations("slow") def test_max_response_time_valid(cli, server, schema_url): # When maximum response time check is specified in the CLI and the request takes less time result = cli.run(schema_url, "--max-response-time=200") # Then no errors should occur assert result.exit_code == ExitCode.OK, result.stdout @pytest.mark.parametrize("openapi_version", (OpenAPIVersion("3.0"),)) @pytest.mark.parametrize("header", ("Authorization", "authorization")) @pytest.mark.operations() def test_auth_and_authorization_header_are_disallowed(cli, schema_url, header, openapi_version): # When ``--auth`` is passed together with ``--header`` that sets the ``Authorization`` header result = cli.run(schema_url, "--auth=test:test", f"--header={header}:token123") # Then it causes a validation error assert result.exit_code == ExitCode.INTERRUPTED assert ( "Invalid value: Passing `--auth` together with `--header` that sets `Authorization` is not allowed." in result.stdout ) @pytest.mark.parametrize("workers_num", (1, 2)) @pytest.mark.parametrize("openapi_version", (OpenAPIVersion("3.0"),)) @pytest.mark.operations("failure", "success") def test_exit_first(cli, schema_url, openapi_version, workers_num, mocker): # When the `--exit-first` CLI option is passed # And a failure occurs stop_worker = mocker.spy(threadpool, "stop_worker") result = cli.run(schema_url, "--exitfirst", "-w", str(workers_num)) # Then tests are failed assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout if workers_num == 1: lines = result.stdout.split("\n") # And the execution should stop on the first failure for idx, line in enumerate(lines): if line.startswith("GET /api/failure F"): assert line.endswith("[ 50%]") break else: pytest.fail("Line is not found") # the "FAILURES" sections goes after a new line, rather then continuing to the next operation next_line = lines[idx + 1] assert next_line == "" assert "FAILURES" in lines[idx + 2] else: stop_worker.assert_called() @pytest.mark.parametrize("openapi_version", (OpenAPIVersion("3.0"),)) def test_base_url_not_required_for_dry_run(testdir, cli, openapi_version, empty_open_api_3_schema): schema_file = testdir.makefile(".yaml", schema=yaml.dump(empty_open_api_3_schema)) result = cli.run(str(schema_file), "--dry-run") assert result.exit_code == ExitCode.OK, result.stdout def test_long_operation_output(testdir, empty_open_api_3_schema): # See GH-990 # When there is a narrow screen # And the API schema contains an operation with a long name empty_open_api_3_schema["paths"] = { f"/{"a" * 100}": { "get": { "responses": {"200": {"description": "OK"}}, } }, f"/{"a" * 10}": { "get": { "responses": {"200": {"description": "OK"}}, } }, } schema_file = testdir.makefile(".yaml", schema=yaml.dump(empty_open_api_3_schema)) result = testdir.run("schemathesis", "run", str(schema_file), "--dry-run") # Then this operation name should be truncated assert result.ret == ExitCode.OK assert "GET /aaaaaaaaaa . [ 50%]" in result.outlines assert "GET /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa[...] . [100%]" in result.outlines def test_reserved_characters_in_operation_name(testdir, empty_open_api_3_schema): # See GH-992 # When an API operation name contains `:` empty_open_api_3_schema["paths"] = { "/foo:bar": { "get": { "responses": {"200": {"description": "OK"}}, } }, } schema_file = testdir.makefile(".yaml", schema=yaml.dump(empty_open_api_3_schema)) result = testdir.run("schemathesis", "run", str(schema_file), "--dry-run") # Then this operation name should be displayed with the leading `/` assert result.ret == ExitCode.OK assert "GET /foo:bar . [100%]" in result.outlines def test_error_during_example_generation(testdir, cli): # See GH-994 # When the API schema is in YAML # And contains an unquoted value, that is casted to boolean # And it is behind references # And there are examples of another parameter content = """ swagger: "2.0" basePath: / info: description: Test title: Test version: 1.0 parameters: Bar: in: body name: payload required: true schema: properties: name: example: test type: string type: object paths: /test: post: parameters: - $ref: "#/parameters/Bar" - in: query name: test type: string responses: "201": description: Ok definitions: Foo: properties: bar: example: foo type: string # Should be quoted on: example: true type: boolean type: object """ schema_file = testdir.makefile(".yaml", schema=content) result = cli.run(str(schema_file), "--dry-run", "--validate-schema=false") # Then the run should not be interrupted, but the run fails assert result.exit_code == ExitCode.TESTS_FAILED assert " The API schema contains non-string keys" in result.stdout def test_unsupported_regex(testdir, cli, empty_open_api_3_schema): def make_definition(min_items): return { "post": { "requestBody": { "required": True, "content": { "application/json": { "schema": { "type": "array", # Java-style regular expression "items": {"type": "string", "pattern": r"\p{Alpha}"}, "maxItems": 3, "minItems": min_items, } } }, }, "responses": {"200": {"description": "OK"}}, } } # When an operation uses an unsupported regex syntax empty_open_api_3_schema["paths"] = { # Can't generate anything "/foo": make_definition(min_items=1), # Can generate an empty array "/bar": make_definition(min_items=0), } schema_file = testdir.makefile(".yaml", schema=yaml.dump(empty_open_api_3_schema)) result = cli.run(str(schema_file), "--dry-run", "--hypothesis-max-examples=1") # Then if it is possible it should generate at least something assert "POST /bar ." in result.stdout # And if it is not then there should be an error with a descriptive error message assert "POST /foo E" in result.stdout lines = result.stdout.splitlines() for idx, line in enumerate(lines): if "__ POST /foo [P] __" in line: break else: pytest.fail("Line not found") assert r"Got pattern='\\p{Alpha}', but this is not valid syntax for a Python regular expression" in lines[idx + 1] @pytest.mark.parametrize("extra", ("--auth='test:wrong'", "-H Authorization: Basic J3Rlc3Q6d3Jvbmcn")) @pytest.mark.operations("basic") def test_auth_override_on_protected_operation(cli, base_url, schema_url, extra): # See GH-792 # When the tested API operation has basic auth # And the auth is overridden (directly or via headers) result = cli.run(schema_url, "--checks=all", extra) # And there is an error during testing assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout lines = result.stdout.splitlines() # Then request representation in the output should have the overridden value assert ( lines[18] == f"Headers : {{"Authorization": "Basic J3Rlc3Q6d3Jvbmcn", "User-Agent": "{USER_AGENT}'," f" 'Accept-Encoding': 'gzip, deflate', 'Accept': '*/*', 'Connection': 'keep-alive'}}" ) # And code sample as well assert ( lines[26] == f" curl -X GET -H 'Accept: */*' -H 'Accept-Encoding: gzip, deflate' -H 'Authorization: Basic J3Rlc3Q6d3Jvbmcn' -H 'Connection: keep-alive' -H 'User-Agent: {USER_AGENT}' {base_url}/basic" ) @pytest.mark.parametrize("openapi_version", (OpenAPIVersion("3.0"),)) @pytest.mark.operations("flaky") def test_explicit_headers_in_output_on_errors(cli, base_url, schema_url, openapi_version): # When there is a non-fatal error during testing (e.g. flakiness) # And custom headers were passed explicitly auth = "Basic J3Rlc3Q6d3Jvbmcn" result = cli.run(schema_url, "--checks=all", f"-H Authorization: {auth}") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout lines = result.stdout.splitlines() # Then request representation in the output should have the overridden value assert lines[17] == f"Headers : {{"Authorization": "{auth}', 'User-Agent': '{USER_AGENT}'}}" # And code sample as well assert lines[22].startswith( f" curl -X GET -H 'Authorization: {auth}' -H 'User-Agent: {USER_AGENT}' '{base_url}/flaky?id=0'" ) @pytest.mark.parametrize("openapi_version", (OpenAPIVersion("3.0"),)) @pytest.mark.operations("__all__") def test_debug_output(tmp_path, cli, schema_url, openapi_version, hypothesis_max_examples): # When the `--debug-output-file` option is passed debug_file = tmp_path / "debug.jsonl" cassette_path = tmp_path / "output.yaml" result = cli.run( schema_url, f"--debug-output-file={debug_file}", "--validate-schema=false", f"--hypothesis-max-examples={hypothesis_max_examples or 1}", f"--store-network-log={cassette_path}", ) assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # Then all underlying runner events should be stored as JSONL file assert debug_file.exists() with debug_file.open(encoding="utf-8") as fd: lines = fd.readlines() for line in lines: json.loads(line) # And statuses are encoded as strings assert list(json.loads(lines[-1])["total"]["not_a_server_error"]) == ["success", "total", "failure"] @pytest.mark.operations("cp866") def test_response_payload_encoding(cli, cli_args): # See GH-1073 # When the "failed" response has non UTF-8 encoding result = cli.run(*cli_args, "--checks=all") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # Then it should be displayed according its actual encoding assert "Response payload: `Тест`" in result.stdout.splitlines() @pytest.mark.operations("conformance") def test_response_schema_conformance_deduplication(cli, cli_args): # See GH-907 # When the "response_schema_conformance" check is present # And the app return different error messages caused by the same validator result = cli.run(*cli_args, "--checks=response_schema_conformance") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # Then the errors should be deduplicated assert result.stdout.count("Response payload: ") == 1 @pytest.mark.parametrize("kind", ("env_var", "arg")) @pytest.mark.parametrize("openapi_version", (OpenAPIVersion("3.0"),)) @pytest.mark.operations("success") def test_no_color(monkeypatch, cli, schema_url, kind): args = (schema_url,) if kind == "env_var": monkeypatch.setenv("NO_COLOR", "1") if kind == "arg": args += ("--no-color",) result = cli.run(*args, color=True) assert result.exit_code == ExitCode.OK, result.stdout assert "[1m" not in result.stdout @pytest.mark.parametrize("graphql_path", ("/graphql", "/foo")) def test_graphql_url(cli, graphql_url, graphql_path): # When the target API is GraphQL result = cli.run(graphql_url, "--hypothesis-max-examples=5") assert_graphql(result) def test_graphql_asgi(cli, loadable_graphql_fastapi_app, graphql_path): # When the target API is GraphQL result = cli.run(f"--app={loadable_graphql_fastapi_app}", "--hypothesis-max-examples=5", graphql_path) assert_graphql(result) def assert_graphql(result): assert result.exit_code == ExitCode.OK, result.stdout # Then it should be detected automatically assert "Specification version: GraphQL" in result.stdout assert "getBooks . " in result.stdout assert "getAuthors . " in result.stdout def assert_exit_code(event_stream, code): with pytest.raises(SystemExit) as exc: execute( event_stream, workers_num=1, show_errors_tracebacks=False, validate_schema=False, store_network_log=None, junit_xml=None, verbosity=0, code_sample_style=CodeSampleStyle.default(), debug_output_file=None, schemathesis_io_token=None, schemathesis_io_url=service.DEFAULT_URL, api_slug=None, ) assert exc.value.code == code def test_cli_execute(swagger_20, capsys): event_stream = from_schema(swagger_20).execute() for _ in event_stream: pass assert_exit_code(event_stream, 1) assert capsys.readouterr().out.strip() == "Unexpected error" def test_get_exit_code(swagger_20, capsys): event_stream = from_schema(swagger_20).execute() next(event_stream) event = next(event_stream) assert get_exit_code(event) == 1 @pytest.mark.parametrize("base_url", (None, "http://127.0.0.1/apiv2")) @pytest.mark.parametrize("location", ("path", "query", "header", "cookie")) def test_missing_content_and_schema(cli, base_url, tmp_path, testdir, empty_open_api_3_schema, location): debug_file = tmp_path / "debug.jsonl" # When an Open API 3 parameter is missing `schema` & `content` empty_open_api_3_schema["paths"] = { "/foo": {"get": {"parameters": [{"in": location, "name": "X-Foo", "required": True}]}} } schema_file = testdir.makefile(".json", schema=json.dumps(empty_open_api_3_schema)) args = [ str(schema_file), f"--debug-output-file={debug_file}", "--dry-run", "--validate-schema=false", "--hypothesis-max-examples=1", ] if base_url is not None: args.append(f"--base-url={base_url}") result = cli.run(*args) lines = result.stdout.split("\n") # Then CLI should show that this API operation errored # And show the proper message under its "ERRORS" section if base_url is None: assert lines[10].startswith("GET /foo E") else: assert lines[10].startswith("GET /apiv2/foo E") assert "_ GET /apiv2/foo [P] _" in lines[13] assert ( lines[14] == f'InvalidSchema: Can not generate data for {location} parameter "X-Foo"! ' "It should have either `schema` or `content` keywords defined" ) # And emitted Before / After event pairs have the same correlation ids with debug_file.open(encoding="utf-8") as fd: events = [json.loads(line) for line in fd] assert events[1]["correlation_id"] == events[2]["correlation_id"] # And they should have the same "verbose_name" assert events[1]["verbose_name"] == events[2]["verbose_name"]
import base64 import json import os import pathlib import sys import time from test.apps.openapi.schema import OpenAPIVersion from test.utils import HERE, SIMPLE_PATH from urllib.parse import urljoin import hypothesis import pytest import requests import trustme import yaml from _pytest.main import ExitCode from hypothesis import HealthCheck, Phase, Verbosity from schemathesis import Case, DataGenerationMethod, fixups, service from schemathesis.checks import ALL_CHECKS from schemathesis.cli import LoaderConfig, execute, get_exit_code, reset_checks from schemathesis.constants import DEFAULT_RESPONSE_TIMEOUT, USER_AGENT, CodeSampleStyle from schemathesis.hooks import unregister_all from schemathesis.models import APIOperation from schemathesis.runner import DEFAULT_CHECKS, from_schema from schemathesis.runner.impl import threadpool from schemathesis.stateful import Stateful from schemathesis.targets import DEFAULT_TARGETS PHASES = ", ".join(map(lambda x: x.name, Phase)) HEALTH_CHECKS = "|".join(map(lambda x: x.name, HealthCheck)) def test_commands_help(cli): result = cli.main() assert result.exit_code == ExitCode.OK, result.stdout lines = result.stdout.split("\n") assert lines[11] == " auth Authenticate Schemathesis.io." assert lines[12] == " replay Replay requests from a saved cassette." assert lines[13] == " run Perform schemathesis test." result_help = cli.main("--help") result_h = cli.main("-h") assert result.stdout == result_h.stdout == result_help.stdout def test_run_subprocess(testdir): # To verify that CLI entry point is installed properly result = testdir.run("schemathesis") assert result.ret == ExitCode.OK def test_commands_version(cli): result = cli.main("--version") assert result.exit_code == ExitCode.OK, result.stdout assert "version" in result.stdout.split("\n")[0] @pytest.mark.parametrize( "args, error", ( (("run",), "Error: Missing argument 'SCHEMA'."), (("run", "not-url"), "Error: Invalid SCHEMA, must be a valid URL or file path."), (("run", SIMPLE_PATH), 'Error: Missing argument, "--base-url" is required for SCHEMA specified by file.'), (("run", SIMPLE_PATH, "--base-url=test"), "Error: Invalid base URL"), (("run", SIMPLE_PATH, "--base-url=127.0.0.1:8080"), "Error: Invalid base URL"), ( ("run", "http://127.0.0.1", "--request-timeout=-5"), "Error: Invalid value for '--request-timeout': -5 is not in the range x>=1.", ), ( ("run", "http://127.0.0.1", "--request-timeout=0"), "Error: Invalid value for '--request-timeout': 0 is not in the range x>=1.", ), ( ("run", "http://127.0.0.1", "--method=+"), "Error: Invalid value for '--method' / '-M': Invalid regex: nothing to repeat at position 0", ), ( ("run", "http://127.0.0.1", "--auth=123"), "Error: Invalid value for '--auth' / '-a': Should be in KEY:VALUE format. Got: 123", ), ( ("run", "http://127.0.0.1", "--auth=:pass"), "Error: Invalid value for '--auth' / '-a': Username should not be empty", ), ( ("run", "http://127.0.0.1", "--auth=тест:pass"), "Error: Invalid value for '--auth' / '-a': Username should be latin-1 encodable", ), ( ("run", "http://127.0.0.1", "--auth=user:тест"), "Error: Invalid value for '--auth' / '-a': Password should be latin-1 encodable", ), ( ("run", "http://127.0.0.1", "--auth-type=random"), "Error: Invalid value for '--auth-type' / '-A': 'random' is not one of 'basic', 'digest'.", ), ( ("run", "http://127.0.0.1", "--header=123"), "Error: Invalid value for '--header' / '-H': Should be in KEY:VALUE format. Got: 123", ), ( ("run", "http://127.0.0.1", "--header=:"), "Error: Invalid value for '--header' / '-H': Header name should not be empty", ), ( ("run", "http://127.0.0.1", "--header= :"), "Error: Invalid value for '--header' / '-H': Header name should not be empty", ), ( ("run", "http://127.0.0.1", "--hypothesis-phases=explicit,first,second"), "Error: Invalid value for '--hypothesis-phases': invalid choice(s): first, second. " f"Choose from {PHASES}", ), ( ("run", "http://127.0.0.1", "--hypothesis-deadline=wrong"), "Error: Invalid value for '--hypothesis-deadline': wrong is not a valid integer or None", ), ( ("run", "http://127.0.0.1", "--hypothesis-deadline=0"), "Error: Invalid value for '--hypothesis-deadline': 0 is not in the range 1<=x<=86399999913600000.", ), ( ("run", "http://127.0.0.1", "--header=тест:test"), "Error: Invalid value for '--header' / '-H': Header name should be latin-1 encodable", ), ( ("run", "http://127.0.0.1", "--header=test:тест"), "Error: Invalid value for '--header' / '-H': Header value should be latin-1 encodable", ), (("run", "//test"), "Error: Invalid SCHEMA, must be a valid URL or file path."), ( ("run", "http://127.0.0.1", "--max-response-time=0"), "Error: Invalid value for '--max-response-time': 0 is not in the range x>=1.", ), ), ) def test_commands_run_errors(cli, args, error): # When invalid arguments are passed to CLI result = cli.main(*args) # Then an appropriate error should be displayed assert result.exit_code == ExitCode.INTERRUPTED, result.stdout assert result.stdout.strip().split("\n")[-1] == error def test_certificate_only_key(cli, tmp_path): # When cert key is passed without cert itself result = cli.main("run", "http://127.0.0.1", f"--request-cert-key={tmp_path}") # Then an appropriate error should be displayed assert result.exit_code == ExitCode.INTERRUPTED, result.stdout assert ( result.stdout.strip().split("\n")[-1] == 'Error: Missing argument, "--request-cert" should be specified as well.' ) @pytest.mark.parametrize("openapi_version", (OpenAPIVersion("3.0"),)) @pytest.mark.operations("success") def test_certificates(cli, schema_url, mocker): request = mocker.spy(requests.Session, "request") # When a cert is passed via CLI args ca = trustme.CA() cert = ca.issue_cert("test.org") with cert.private_key_pem.tempfile() as cert_path: result = cli.run(schema_url, f"--request-cert={cert_path}") assert result.exit_code == ExitCode.OK, result.stdout # Then both schema & test network calls should use this cert assert len(request.call_args_list) == 2 assert request.call_args_list[0][1]["cert"] == request.call_args_list[1][1]["cert"] == str(cert_path) def test_commands_run_help(cli): result_help = cli.main("run", "--help") assert result_help.exit_code == ExitCode.OK, result_help.stdout assert result_help.stdout.strip().split("\n") == [ "Usage: schemathesis run [OPTIONS] SCHEMA [API_SLUG]", "", " Perform schemathesis test against an API specified by SCHEMA.", "", " SCHEMA must be a valid URL or file path pointing to an Open API / GraphQL", " specification.", "", " API_SLUG is an API identifier to upload data to Schemathesis.io.", "", "Filtering options:", "", " These options define what parts of the API will be tested.", "", " -E, --endpoint TEXT Filter schemathesis tests by API operation path", " pattern. Example: users/\\d+", " -M, --method TEXT Filter schemathesis tests by HTTP method.", " -T, --tag TEXT Filter schemathesis tests by schema tag pattern.", " -O, --operation-id TEXT Filter schemathesis tests by operationId", " pattern.", " --skip-deprecated-operations Skip testing of deprecated API operations.", " [default: False]", "", "Validation options:", "", " Options, responsible for how responses & schemas will be checked.", "", " -c, --checks [not_a_server_error|status_code_conformance|" "content_type_conformance|response_headers_conformance|response_schema_conformance|all]", " List of checks to run. [default:", " not_a_server_error]", " --max-response-time INTEGER RANGE", " A custom check that will fail if the response", " time is greater than the specified one in", " milliseconds. [x>=1]", " --validate-schema BOOLEAN Enable or disable validation of input schema.", " [default: False]", "", "Hypothesis options:", "", " Configuration of the underlying Hypothesis engine.", "", " --hypothesis-deadline INTEGER RANGE", " Duration in milliseconds that each individual", " example with a test is not allowed to exceed.", " [1<=x<=86399999913600000]", " --hypothesis-derandomize Use Hypothesis's deterministic mode.", " --hypothesis-max-examples INTEGER RANGE", " Maximum number of generated examples per each", " method/path combination. [x>=1]", f" --hypothesis-phases [{PHASES.replace(', ', '|')}]", " Control which phases should be run.", " --hypothesis-report-multiple-bugs BOOLEAN", " Raise only the exception with the smallest", " minimal example.", " --hypothesis-seed INTEGER Set a seed to use for all Hypothesis tests.", f" --hypothesis-suppress-health-check [{HEALTH_CHECKS}]", " Comma-separated list of health checks to", " disable.", " --hypothesis-verbosity [quiet|normal|verbose|debug]", " Verbosity level of Hypothesis messages.", "", "Generic options:", " -D, --data-generation-method [positive|negative]", " Defines how Schemathesis generates data for", " tests. [default:", " DataGenerationMethod.positive]", " -t, --target [response_time|all]", " Targets for input generation.", " -x, --exitfirst Exit instantly on first error or failed test.", " [default: False]", " --dry-run Disable sending data to the application and", " checking responses. Helpful to verify whether", " data is generated at all.", " -a, --auth TEXT Server user and password. Example:", " USER:PASSWORD", " -A, --auth-type [basic|digest] The authentication mechanism to be used.", " Defaults to 'basic'. [default: basic]", " -H, --header TEXT Custom header that will be used in all", " requests to the server. Example:", r" Authorization: Bearer\ 123", " -w, --workers [auto|1-64] Number of workers to run tests. [default: 1]", " -b, --base-url TEXT Base URL address of the API, required for", " SCHEMA if specified by file.", " --app TEXT WSGI/ASGI application to test.", " --request-timeout INTEGER RANGE", " Timeout in milliseconds for network requests", " during the test run. [x>=1]", " --request-tls-verify TEXT Controls whether Schemathesis verifies the", " server's TLS certificate. You can also pass", " the path to a CA_BUNDLE file for private", " certs. [default: true]", " --request-cert PATH File path of unencrypted client certificate", " for authentication. The certificate can be", " bundled with a private key (e.g. PEM) or the", " private key can be provided with the", " --request-cert-key argument.", " --request-cert-key PATH File path of the private key of the client", " certificate.", " --junit-xml FILENAME Create junit-xml style report file at given", " path.", " --debug-output-file FILENAME Save debug output as JSON lines in the given", " file.", " --show-errors-tracebacks Show full tracebacks for internal errors.", " [default: False]", " --code-sample-style [python|curl]", " Controls the style of code samples for failure", " reproduction.", " --store-network-log FILENAME Store requests and responses into a file.", " --fixups [fast_api|all] Install specified compatibility fixups.", " --stateful [none|links] Utilize stateful testing capabilities.", " --stateful-recursion-limit INTEGER RANGE", " Limit recursion depth for stateful testing.", " [default: 5; 1<=x<=100]", " --force-schema-version [20|30] Force Schemathesis to parse the input schema", " with the specified spec version.", " --no-color Disable ANSI color escape codes.", " --schemathesis-io-token TEXT Schemathesis.io authentication token.", " --schemathesis-io-url TEXT Schemathesis.io base URL.", " --hosts-file FILE Path to a file to store the Schemathesis.io", " auth configuration.", " -v, --verbosity Reduce verbosity of error output.", " -h, --help Show this message and exit.", ] SCHEMA_URI = "https://example.schemathesis.io/openapi.json" @pytest.mark.parametrize( "args, expected", ( ([], {}), (["--exitfirst"], {"exit_first": True}), (["--workers=2"], {"workers_num": 2}), (["--hypothesis-seed=123"], {"seed": 123}), ( [ "--hypothesis-deadline=1000", "--hypothesis-derandomize", "--hypothesis-max-examples=1000", "--hypothesis-phases=explicit,generate", "--hypothesis-report-multiple-bugs=0", "--hypothesis-suppress-health-check=too_slow,filter_too_much", "--hypothesis-verbosity=normal", ], { "hypothesis_settings": hypothesis.settings( deadline=1000, derandomize=True, max_examples=1000, phases=[Phase.explicit, Phase.generate], report_multiple_bugs=False, suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much], verbosity=Verbosity.normal, ) }, ), (["--hypothesis-deadline=None"], {"hypothesis_settings": hypothesis.settings(deadline=None)}), (["--max-response-time=10"], {"max_response_time": 10}), ), ) def test_from_schema_arguments(cli, mocker, swagger_20, args, expected): mocker.patch("schemathesis.cli.load_schema", return_value=swagger_20) execute = mocker.patch("schemathesis.runner.from_schema", autospec=True) result = cli.run(SCHEMA_URI, *args) expected = { "checks": DEFAULT_CHECKS, "targets": DEFAULT_TARGETS, "workers_num": 1, "exit_first": False, "dry_run": False, "stateful": Stateful.links, "stateful_recursion_limit": 5, "auth": None, "auth_type": "basic", "headers": {}, "request_timeout": DEFAULT_RESPONSE_TIMEOUT, "request_tls_verify": True, "request_cert": None, "store_interactions": False, "seed": None, "max_response_time": None, **expected, } hypothesis_settings = expected.pop("hypothesis_settings", None) call_kwargs = execute.call_args[1] executed_hypothesis_settings = call_kwargs.pop("hypothesis_settings", None) if hypothesis_settings is not None: # Compare non-default Hypothesis settings as `hypothesis.settings` can't be compared assert executed_hypothesis_settings.show_changed() == hypothesis_settings.show_changed() assert call_kwargs == expected @pytest.mark.parametrize( "args, expected", ( (["--auth=test:test"], {"auth": ("test", "test"), "auth_type": "basic"}), (["--auth=test:test", "--auth-type=digest"], {"auth": ("test", "test"), "auth_type": "digest"}), (["--auth=test:test", "--auth-type=DIGEST"], {"auth": ("test", "test"), "auth_type": "digest"}), (["--header=Authorization:Bearer 123"], {"headers": {"Authorization": "Bearer 123"}}), (["--header=Authorization: Bearer 123 "], {"headers": {"Authorization": "Bearer 123 "}}), (["--method=POST", "--method", "GET"], {"method": ("POST", "GET")}), (["--method=POST", "--auth=test:test"], {"auth": ("test", "test"), "auth_type": "basic", "method": ("POST",)}), (["--endpoint=users"], {"endpoint": ("users",)}), (["--tag=foo"], {"tag": ("foo",)}), (["--operation-id=getUser"], {"operation_id": ("getUser",)}), (["--base-url=https://example.com/api/v1test"], {"base_url": "https://example.com/api/v1test"}), ), ) def test_load_schema_arguments(cli, mocker, args, expected): mocker.patch("schemathesis.runner.SingleThreadRunner.execute", autospec=True) load_schema = mocker.patch("schemathesis.cli.load_schema", autospec=True) cli.run(SCHEMA_URI, *args) expected = LoaderConfig( SCHEMA_URI, **{ **{ "app": None, "base_url": None, "auth": None, "auth_type": "basic", "endpoint": None, "headers": {}, "data_generation_methods": [DataGenerationMethod.default()], "method": None, "tag": None, "operation_id": None, "validate_schema": False, "skip_deprecated_operations": False, "force_schema_version": None, "request_tls_verify": True, "request_cert": None, }, **expected, }, ) assert load_schema.call_args[0][0] == expected def test_load_schema_arguments_headers_to_loader_for_app(testdir, cli, mocker): from_wsgi = mocker.patch("schemathesis.specs.openapi.loaders.from_wsgi", autospec=True) module = testdir.make_importable_pyfile( location=""" from test.apps.openapi._flask import create_app app = create_app() """ ) cli.run("/schema.yaml", "--app", f"{module.purebasename}:app", "-H", "Authorization: Bearer 123") assert from_wsgi.call_args[1]["headers"]["Authorization"] == "Bearer 123" def test_all_checks(cli, mocker, swagger_20): mocker.patch("schemathesis.cli.load_schema", return_value=swagger_20) execute = mocker.patch("schemathesis.runner.from_schema", autospec=True) result = cli.run(SCHEMA_URI, "--checks=all") assert execute.call_args[1]["checks"] == ALL_CHECKS @pytest.mark.operations() def test_hypothesis_parameters(cli, schema_url): # When Hypothesis options are passed via command line result = cli.run( schema_url, "--hypothesis-deadline=1000", "--hypothesis-derandomize", "--hypothesis-max-examples=1000", "--hypothesis-phases=explicit,generate", "--hypothesis-report-multiple-bugs=0", "--hypothesis-suppress-health-check=too_slow,filter_too_much", "--hypothesis-verbosity=normal", ) # Then they should be correctly converted into arguments accepted by `hypothesis.settings` # Parameters are validated in `hypothesis.settings` assert result.exit_code == ExitCode.OK, result.stdout @pytest.mark.operations("success") @pytest.mark.parametrize("workers", (1, 2)) def test_cli_run_output_success(cli, cli_args, workers): result = cli.run(*cli_args, f"--workers={workers}") assert result.exit_code == ExitCode.OK, result.stdout lines = result.stdout.split("\n") assert lines[7] == f"Workers: {workers}" if workers == 1: assert lines[10].startswith("GET /api/success .") else: assert lines[10] == "." assert " HYPOTHESIS OUTPUT " not in result.stdout assert " SUMMARY " in result.stdout lines = result.stdout.strip().split("\n") last_line = lines[-1] assert "== 1 passed in " in last_line # And the running time is a small positive number time = float(last_line.split(" ")[-2].replace("s", "")) assert 0 <= time < 5 @pytest.mark.parametrize("workers", (1, 2)) def test_cli_run_output_with_errors(cli, cli_args, workers): result = cli.run(*cli_args, f"--workers={workers}") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert " HYPOTHESIS OUTPUT " not in result.stdout assert " SUMMARY " in result.stdout lines = result.stdout.strip().split("\n") assert "1. Received a response with 5xx status code: 500" in lines assert "Performed checks:" in lines assert " not_a_server_error 1 / 3 passed FAILED " in lines assert "== 1 passed, 1 failed in " in lines[-1] @pytest.mark.operations("failure") @pytest.mark.parametrize("workers", (1, 2)) def test_cli_run_only_failure(cli, cli_args, app_type, workers): result = cli.run(*cli_args, f"--workers={workers}") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert " HYPOTHESIS OUTPUT " not in result.stdout assert " SUMMARY " in result.stdout lines = result.stdout.strip().split("\n") if app_type == "real": assert "Response payload: `500: Internal Server Error`" in lines else: assert "<h1>Internal Server Error</h1>" in lines assert " not_a_server_error 0 / 2 passed FAILED " in lines assert "== 1 failed in " in lines[-1] @pytest.mark.operations("upload_file") def test_cli_binary_body(cli, schema_url, hypothesis_max_examples): result = cli.run( schema_url, "--hypothesis-suppress-health-check=filter_too_much", f"--hypothesis-max-examples={hypothesis_max_examples or 1}", ) assert result.exit_code == ExitCode.OK, result.stdout assert " HYPOTHESIS OUTPUT " not in result.stdout @pytest.mark.operations() @pytest.mark.parametrize("workers", (1, 2)) def test_cli_run_output_empty(cli, cli_args, workers): result = cli.run(*cli_args, f"--workers={workers}") assert result.exit_code == ExitCode.OK, result.stdout assert " HYPOTHESIS OUTPUT " not in result.stdout assert " SUMMARY " in result.stdout lines = result.stdout.strip().split("\n") assert "No checks were performed." in lines assert "= Empty test suite =" in lines[-1] @pytest.mark.operations() @pytest.mark.parametrize("workers", (1, 2)) def test_cli_run_changed_base_url(cli, server, cli_args, workers): # When the CLI receives custom base URL base_url = f"http://127.0.0.1:{server['port']}/api" result = cli.run(*cli_args, "--base-url", base_url, f"--workers={workers}") # Then the base URL should be correctly displayed in the CLI output lines = result.stdout.strip().split("\n") assert lines[5] == f"Base URL: {base_url}" @pytest.mark.parametrize( "url, message", ( ("/doesnt_exist", "Schema was not found at http://127.0.0.1"), ("/failure", "Failed to load schema, code 500 was returned from http://127.0.0.1"), ), ) @pytest.mark.operations("failure") @pytest.mark.parametrize("workers", (1, 2)) def test_execute_missing_schema(cli, openapi3_base_url, url, message, workers): result = cli.run(f"{openapi3_base_url}{url}", f"--workers={workers}") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert message in result.stdout @pytest.mark.operations("success", "slow") @pytest.mark.parametrize("workers", (1, 2)) def test_hypothesis_failed_event(cli, cli_args, workers): # When the Hypothesis deadline option is set manually, and it is smaller than the response time result = cli.run(*cli_args, "--hypothesis-deadline=20", f"--workers={workers}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And the given operation should be displayed as an error lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("GET /api/slow E") else: # It could be in any sequence, because of multiple threads assert lines[10].split("\n")[0] in ("E.", ".E", "EE") # empty line after all tests progress output assert lines[11] == "" # And the proper error message should be displayed assert "DeadlineExceeded: API response time is too slow! " in result.stdout assert "which exceeds the deadline of 20.00ms" in result.stdout # And the CLI should not suggest showing full tracebacks to the user assert "Add this option to your command line parameters to see full tracebacks" not in result.stdout @pytest.mark.operations("success", "slow") @pytest.mark.parametrize("workers", (1, 2)) def test_connection_timeout(cli, server, schema_url, workers): # When connection timeout is specified in the CLI and the request fails because of it result = cli.run(schema_url, "--request-timeout=80", f"--workers={workers}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And the given operation should be displayed as a failure lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("GET /api/slow F") assert lines[11].startswith("GET /api/success .") else: # It could be in any sequence, because of multiple threads assert lines[10].split("\n")[0] in ("F.", ".F") # And the proper error message should be displayed assert "1. Response timed out after 80.00ms" in result.stdout @pytest.mark.operations("success", "slow") @pytest.mark.parametrize("workers", (1, 2)) def test_default_hypothesis_settings(cli, cli_args, workers): # When there is a slow operation and if it is faster than 15s result = cli.run(*cli_args, f"--workers={workers}") # Then the tests should pass, because of default 15s deadline assert result.exit_code == ExitCode.OK, result.stdout lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("GET /api/slow .") assert lines[11].startswith("GET /api/success .") else: # It could be in any sequence, because of multiple threads assert lines[10] == ".." @pytest.mark.operations("failure") @pytest.mark.parametrize("workers", (1, 2)) def test_seed(cli, cli_args, workers): # When there is a failure result = cli.run(*cli_args, "--hypothesis-seed=456", f"--workers={workers}") # Then the tests should fail and RNG seed should be displayed assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert "Or add this option to your command line parameters: --hypothesis-seed=456" in result.stdout.split("\n") @pytest.mark.operations("unsatisfiable") @pytest.mark.parametrize("workers", (1, 2)) def test_unsatisfiable(cli, cli_args, workers): # When the app's schema contains parameters that can't be generated # For example if it contains contradiction in the parameters definition - requires to be integer AND string at the # same time result = cli.run(*cli_args, f"--workers={workers}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And standard Hypothesis error should not appear in the output assert "You can add @seed" not in result.stdout # And this operation should be marked as errored in the progress line lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("POST /api/unsatisfiable E") else: assert lines[10] == "E" # And more clear error message is displayed instead of Hypothesis one lines = result.stdout.split("\n") assert "hypothesis.errors.Unsatisfiable: Unable to satisfy schema parameters for this API operation" in lines @pytest.mark.operations("flaky") @pytest.mark.parametrize("workers", (1, 2)) def test_flaky(cli, cli_args, workers): # When the operation fails / succeeds randomly # Derandomize is needed for reproducible test results result = cli.run(*cli_args, "--hypothesis-derandomize", f"--workers={workers}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And standard Hypothesis error should not appear in the output assert "Failed to reproduce exception. Expected:" not in result.stdout # And this operation should be marked as errored in the progress line lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("GET /api/flaky E") else: assert lines[10] == "E" # And it should be displayed only once in "ERRORS" section assert "= ERRORS =" in result.stdout assert "_ GET /api/flaky [P] _" in result.stdout # And it should not go into "FAILURES" section assert "= FAILURES =" not in result.stdout # And more clear error message is displayed instead of Hypothesis one lines = result.stdout.split("\n") assert "hypothesis.errors.Flaky: Tests on this API operation produce unreliable results: " in lines assert "Falsified on the first call but did not on a subsequent one" in lines # And example is displayed assert "Query : {'id': 0}" in lines @pytest.mark.operations("invalid") @pytest.mark.parametrize("workers", (1,)) def test_invalid_operation(cli, cli_args, workers): # When the app's schema contains errors # For example if its type is "int" but should be "integer" # And schema validation is disabled result = cli.run(*cli_args, f"--workers={workers}", "--validate-schema=false") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And standard Hypothesis error should not appear in the output assert "You can add @seed" not in result.stdout # And this operation should be marked as errored in the progress line lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("POST /api/invalid E") else: assert lines[10] == "E" assert " POST /api/invalid " in lines[13] # There shouldn't be a section end immediately after section start - there should be some error text # An internal error happened during a test run # Error: AssertionError assert not lines[14].startswith("=") @pytest.mark.operations("invalid") def test_invalid_operation_suggestion(cli, cli_args): # When the app's schema contains errors result = cli.run(*cli_args, "--validate-schema=true") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And there should be a suggestion to disable schema validation expected = """You can disable input schema validation with --validate-schema=false command-line option In this case, Schemathesis cannot guarantee proper behavior during the test run """ assert expected in result.stdout @pytest.mark.operations("invalid") def test_invalid_operation_suggestion_disabled(cli, cli_args): # When the app's schema contains errors # And schema validation is disabled result = cli.run(*cli_args, "--validate-schema=false") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And there should be no suggestion assert "You can disable input schema validation" not in result.stdout @pytest.mark.operations("teapot") @pytest.mark.parametrize("workers", (1, 2)) def test_status_code_conformance(cli, cli_args, workers): # When operation returns a status code, that is not listed in "responses" # And "status_code_conformance" is specified result = cli.run(*cli_args, "-c", "status_code_conformance", f"--workers={workers}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And this operation should be marked as failed in the progress line lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("POST /api/teapot F") else: assert lines[10] == "F" assert "status_code_conformance 0 / 2 passed FAILED" in result.stdout lines = result.stdout.split("\n") assert "1. Received a response with a status code, which is not defined in the schema: 418" in lines assert lines[16].strip() == "Declared status codes: 200" @pytest.mark.operations("headers") def test_headers_conformance_invalid(cli, cli_args): result = cli.run(*cli_args, "-c", "response_headers_conformance") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout lines = result.stdout.split("\n") assert "1. Received a response with missing headers: X-Custom-Header" in lines @pytest.mark.operations("headers") def test_headers_conformance_valid(cli, cli_args): result = cli.run(*cli_args, "-c", "response_headers_conformance", "-H", "X-Custom-Header: bla") assert result.exit_code == ExitCode.OK, result.stdout lines = result.stdout.split("\n") assert "1. Received a response with missing headers: X-Custom-Header" not in lines @pytest.mark.operations("multiple_failures") def test_multiple_failures_single_check(cli, schema_url): result = cli.run(schema_url, "--hypothesis-seed=1", "--hypothesis-derandomize") assert "= HYPOTHESIS OUTPUT =" not in result.stdout assert "Hypothesis found 2 distinct failures" not in result.stdout lines = result.stdout.strip().split("\n") assert "1. Received a response with 5xx status code: 500" in lines assert "2. Received a response with 5xx status code: 504" in lines assert "1 failed in " in lines[-1] @pytest.mark.operations("multiple_failures") def test_multiple_failures_different_check(cli, schema_url): result = cli.run( schema_url, "-c", "status_code_conformance", "-c", "not_a_server_error", "--hypothesis-derandomize", "--hypothesis-seed=1", ) assert "= HYPOTHESIS OUTPUT =" not in result.stdout lines = result.stdout.strip().split("\n") assert "1. Received a response with a status code, which is not defined in the schema: 500" in lines assert "2. Received a response with 5xx status code: 500" in lines assert "3. Received a response with a status code, which is not defined in the schema: 504" in lines assert "4. Received a response with 5xx status code: 504" in lines assert "1 failed in " in lines[-1] @pytest.mark.parametrize("workers", (1, 2)) def test_connection_error(cli, schema_url, workers): # When the given base_url is unreachable result = cli.run(schema_url, "--base-url=http://127.0.0.1:1/api", f"--workers={workers}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And all collected API operations should be marked as errored lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("GET /api/failure E") assert lines[11].startswith("GET /api/success E") else: assert lines[10] == "EE" # And errors section title should be displayed assert "= ERRORS =" in result.stdout # And all API operations should be mentioned in this section as subsections assert "_ GET /api/success [P] _" in result.stdout assert "_ GET /api/failure [P] _" in result.stdout # And the proper error messages should be displayed for each operation assert "Max retries exceeded with url: /api/success" in result.stdout assert "Max retries exceeded with url: /api/failure" in result.stdout @pytest.mark.parametrize("workers", (1, 2)) def test_schema_not_available(cli, workers): # When the given schema is unreachable result = cli.run("http://127.0.0.1:1/schema.yaml", f"--workers={workers}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And error message is displayed lines = result.stdout.split("\n") assert lines[0] == "Failed to load schema from http://127.0.0.1:1/schema.yaml" assert lines[1].startswith( "Error: requests.exceptions.ConnectionError: HTTPConnectionPool(host='127.0.0.1', port=1): " "Max retries exceeded with url: /schema.yaml" ) def test_schema_not_available_wsgi(cli, loadable_flask_app): # When the given schema is unreachable result = cli.run("unknown.yaml", f"--app={loadable_flask_app}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And error message is displayed lines = result.stdout.split("\n") assert lines[0] == "Schema was not found at unknown.yaml" @pytest.mark.operations("custom_format") def test_pre_run_hook_valid(testdir, cli, schema_url, app): # When `--pre-run` hook is passed to the CLI call module = testdir.make_importable_pyfile( hook=""" import string import schemathesis from hypothesis import strategies as st schemathesis.register_string_format( "digits", st.text( min_size=1, alphabet=st.characters( whitelist_characters=string.digits, whitelist_categories=() ) ) ) """ ) result = cli.main( "--pre-run", module.purebasename, "run", "--hypothesis-suppress-health-check=filter_too_much", schema_url ) # Then CLI should run successfully assert result.exit_code == ExitCode.OK, result.stdout # And all registered new string format should produce digits as expected assert all(request.query["id"].isdigit() for request in app["incoming_requests"]) def test_pre_run_hook_invalid(testdir, cli): # When `--pre-run` hook is passed to the CLI call # And its importing causes an exception module = testdir.make_importable_pyfile(hook="1 / 0") result = cli.main("--pre-run", module.purebasename, "run", "http://127.0.0.1:1") # Then CLI run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And a helpful message should be displayed in the output lines = result.stdout.strip().split("\n") assert lines[0] == "An exception happened during the hook loading:" assert lines[7] == "ZeroDivisionError: division by zero" assert lines[9] == "Aborted!" def test_pre_run_hook_module_not_found(testdir, cli): testdir.makepyfile(hook="1 / 0") result = cli.main("--pre-run", "hook", "run", "http://127.0.0.1:1") assert os.getcwd() in sys.path # Then CLI run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert "ModuleNotFoundError" not in result.stdout lines = result.stdout.strip().split("\n") assert lines[0] == "An exception happened during the hook loading:" assert lines[7] == "ZeroDivisionError: division by zero" assert lines[9] == "Aborted!" @pytest.mark.usefixtures("reset_hooks") def test_conditional_checks(testdir, cli, hypothesis_max_examples, schema_url): module = testdir.make_importable_pyfile( hook=""" import schemathesis import click @schemathesis.register_check def conditional_check(response, case): # skip this check return True """ ) result = cli.main( "--pre-run", module.purebasename, "run", "-c", "conditional_check", schema_url, f"--hypothesis-max-examples={hypothesis_max_examples or 1}", ) assert result.exit_code == ExitCode.OK # One additional case created for two API operations - /api/failure and /api/success. assert "No checks were performed." in result.stdout @pytest.mark.usefixtures("reset_hooks") def test_add_case(testdir, cli, hypothesis_max_examples, schema_url): module = testdir.make_importable_pyfile( hook=""" import schemathesis import click @schemathesis.hooks.register def add_case(context, case, response): if not case.headers: case.headers = {} case.headers["copy"] = "this is a copied case" return case @schemathesis.register_check def add_case_check(response, case): if case.headers and case.headers.get("copy") == "this is a copied case": # we will look for this output click.echo("The case was added!") """ ) result = cli.main( "--pre-run", module.purebasename, "run", "-c", "add_case_check", schema_url, f"--hypothesis-max-examples={hypothesis_max_examples or 1}", ) assert result.exit_code == ExitCode.OK # One additional case created for two API operations - /api/failure and /api/success. assert result.stdout.count("The case was added!") == 2 @pytest.mark.usefixtures("reset_hooks") def test_add_case_returns_none(testdir, cli, hypothesis_max_examples, schema_url): """Tests that no additional test case created when the add_case hook returns None.""" module = testdir.make_importable_pyfile( hook=""" import schemathesis import click @schemathesis.hooks.register def add_case(context, case, response): return None @schemathesis.register_check def add_case_check(response, case): click.echo("Validating case.") """ ) result = cli.main( "--pre-run", module.purebasename, "run", "-c", "add_case_check", schema_url, f"--hypothesis-max-examples={hypothesis_max_examples or 1}", ) assert result.exit_code == ExitCode.OK # with --hypothesis-max-examples=1 and 2 API operations, only two cases should be created and validated. # If the count is greater than 2, additional test cases should not have been created but were created. assert result.stdout.count("Validating case.") == 2 @pytest.mark.usefixtures("reset_hooks") def test_multiple_add_case_hooks(testdir, cli, hypothesis_max_examples, schema_url): """add_case hooks that mutate the case in place should not affect other cases.""" module = testdir.make_importable_pyfile( hook=""" import schemathesis import click @schemathesis.hooks.register("add_case") def add_first_header(context, case, response): if not case.headers: case.headers = {} case.headers["first"] = "first header" return case @schemathesis.hooks.register("add_case") def add_second_header(context, case, response): if not case.headers: case.headers = {} case.headers["second"] = "second header" return case @schemathesis.register_check def add_case_check(response, case): if case.headers and case.headers.get("first") == "first header": # we will look for this output click.echo("First case added!") if case.headers and case.headers.get("second") == "second header": # we will look for this output click.echo("Second case added!") """ ) result = cli.main( "--pre-run", module.purebasename, "run", "-c", "add_case_check", schema_url, f"--hypothesis-max-examples={hypothesis_max_examples or 1}", ) assert result.exit_code == ExitCode.OK # Each header should only be duplicated once for each API operation - /api/failure and /api/success. assert result.stdout.count("First case added!") == 2 assert result.stdout.count("Second case added!") == 2 @pytest.mark.usefixtures("reset_hooks") def test_add_case_output(testdir, cli, hypothesis_max_examples, schema_url): module = testdir.make_importable_pyfile( hook=""" import schemathesis import click @schemathesis.hooks.register("add_case") def add_first_header(context, case, response): if not case.headers: case.headers = {} case.headers["first"] = "first header" return case @schemathesis.hooks.register("add_case") def add_second_header(context, case, response): if not case.headers: case.headers = {} case.headers["second"] = "second header" return case @schemathesis.register_check def add_case_check(response, case): if ( case.headers and ( case.headers.get("second") == "second header" ) ): assert False, "failing cases from second add_case hook" """ ) result = cli.main( "--pre-run", module.purebasename, "run", "-c", "add_case_check", schema_url, f"--hypothesis-max-examples={hypothesis_max_examples or 1}", ) assert result.exit_code == ExitCode.TESTS_FAILED assert result.stdout.count("failing cases from second add_case hook") == 2 add_case_check_line = next( filter(lambda line: line.strip().startswith("add_case_check"), result.stdout.split("\n")) ) assert "8 / 12" in add_case_check_line @pytest.fixture def reset_hooks(): yield unregister_all() reset_checks() @pytest.fixture( params=[ ('AssertionError("Custom check failed!")', "1. Custom check failed!"), ("AssertionError", "1. Check 'new_check' failed"), ] ) def new_check(request, testdir, cli): exception, message = request.param module = testdir.make_importable_pyfile( hook=f""" import schemathesis @schemathesis.register_check def new_check(response, result): raise {exception} """ ) yield module, message reset_checks() # To verify that "new_check" is unregistered result = cli.run("--help") lines = result.stdout.splitlines() assert ( " -c, --checks [not_a_server_error|status_code_conformance|content_type_conformance|" "response_headers_conformance|response_schema_conformance|all]" in lines ) @pytest.mark.operations("success") def test_register_check(new_check, cli, schema_url): new_check, message = new_check # When `--pre-run` hook is passed to the CLI call # And it contains registering a new check, which always fails for the testing purposes result = cli.main("--pre-run", new_check.purebasename, "run", "-c", "new_check", schema_url) # Then CLI run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And a message from the new check should be displayed lines = result.stdout.strip().split("\n") assert lines[14] == message def assert_threaded_executor_interruption(lines, expected, optional_interrupt=False): # It is possible to have a case when first call without an error will start processing # But after, another thread will have interruption and will push this event before the # first thread will finish. Race condition: "" is for this case and "." for the other # way around # The app under test was killed ungracefully and since we run it in a child or the main thread # its output might occur in the captured stdout. ignored_exception = "Exception ignored in: " in lines[10] assert lines[10] in expected or ignored_exception, lines if not optional_interrupt: assert "!! KeyboardInterrupt !!" in lines[11], lines assert any("=== SUMMARY ===" in line for line in lines[10:]) @pytest.mark.parametrize("workers", (1, 2)) @pytest.mark.filterwarnings("ignore:Exception in thread") def test_keyboard_interrupt(cli, cli_args, base_url, mocker, flask_app, swagger_20, workers): # When a Schemathesis run in interrupted by keyboard or via SIGINT operation = APIOperation("/success", "GET", {}, swagger_20, base_url=base_url) if len(cli_args) == 2: operation.app = flask_app original = Case(operation).call_wsgi else: original = Case(operation).call counter = 0 def mocked(*args, **kwargs): nonlocal counter counter += 1 if counter > 1: # For threaded case it emulates SIGINT for the worker thread raise KeyboardInterrupt return original(*args, **kwargs) if len(cli_args) == 2: mocker.patch("schemathesis.Case.call_wsgi", wraps=mocked) else: mocker.patch("schemathesis.Case.call", wraps=mocked) result = cli.run(*cli_args, f"--workers={workers}") assert result.exit_code == ExitCode.OK, result.stdout # Then execution stops, and a message about interruption is displayed lines = result.stdout.strip().split("\n") # And summary is still displayed in the end of the output if workers == 1: assert lines[10].startswith("GET /api/failure .") assert lines[10].endswith("[ 50%]") assert lines[11] == "GET /api/success " assert "!! KeyboardInterrupt !!" in lines[12] assert "== SUMMARY ==" in lines[14] else: assert_threaded_executor_interruption(lines, ("", ".")) @pytest.mark.filterwarnings("ignore:Exception in thread") def test_keyboard_interrupt_threaded(cli, cli_args, mocker): # When a Schemathesis run is interrupted by the keyboard or via SIGINT original = time.sleep counter = 0 def mocked(*args, **kwargs): nonlocal counter counter += 1 if counter > 1: raise KeyboardInterrupt return original(*args, **kwargs) mocker.patch("schemathesis.runner.impl.threadpool.time.sleep", autospec=True, wraps=mocked) result = cli.run(*cli_args, "--workers=2", "--hypothesis-derandomize") # the exit status depends on what thread finished first assert result.exit_code in (ExitCode.OK, ExitCode.TESTS_FAILED), result.stdout # Then execution stops, and a message about interruption is displayed lines = result.stdout.strip().split("\n") # There are many scenarios possible, depends on how many tests will be executed before interruption # and in what order. it could be no tests at all, some of them or all of them. assert_threaded_executor_interruption(lines, ("F", ".", "F.", ".F", ""), True) @pytest.mark.operations("failure") @pytest.mark.parametrize("workers", (1, 2)) def test_hypothesis_output_capture(mocker, cli, cli_args, workers): mocker.patch("schemathesis.utils.IGNORED_PATTERNS", ()) result = cli.run(*cli_args, f"--workers={workers}") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert "= HYPOTHESIS OUTPUT =" in result.stdout assert "Falsifying example" in result.stdout async def test_multiple_files_schema(openapi_2_app, testdir, cli, hypothesis_max_examples, openapi2_base_url): # When the schema contains references to other files uri = pathlib.Path(HERE).as_uri() + "/" schema = { "swagger": "2.0", "info": {"title": "Example API", "description": "An API to test Schemathesis", "version": "1.0.0"}, "host": "127.0.0.1:8888", "basePath": "/api", "schemes": ["http"], "produces": ["application/json"], "paths": { "/teapot": { "post": { "parameters": [ { # during the CLI run we have a different working directory, # so specifying an absolute uri "schema": {"$ref": urljoin(uri, "data/petstore_v2.yaml#/definitions/Pet")}, "in": "body", "name": "user", "required": True, } ], "responses": {"200": {"description": "OK"}}, } } }, } openapi_2_app["config"].update({"should_fail": True, "schema_data": schema}) schema_file = testdir.makefile(".yaml", schema=yaml.dump(schema)) # And file path is given to the CLI result = cli.run( str(schema_file), f"--base-url={openapi2_base_url}", f"--hypothesis-max-examples={hypothesis_max_examples or 5}", "--hypothesis-derandomize", ) # Then Schemathesis should resolve it and run successfully assert result.exit_code == ExitCode.OK, result.stdout # And all relevant requests should contain proper data for resolved references payload = await openapi_2_app["incoming_requests"][0].json() assert isinstance(payload["name"], str) assert isinstance(payload["photoUrls"], list) def test_wsgi_app(testdir, cli): module = testdir.make_importable_pyfile( location=""" from test.apps.openapi._flask import create_app app = create_app() """ ) result = cli.run("/schema.yaml", "--app", f"{module.purebasename}:app") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert "1 passed, 1 failed in" in result.stdout def test_wsgi_app_exception(testdir, cli): module = testdir.make_importable_pyfile( location=""" from test.apps.openapi._flask import create_app 1 / 0 """ ) result = cli.run("/schema.yaml", "--app", f"{module.purebasename}:app", "--show-errors-tracebacks") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert "Traceback (most recent call last):" in result.stdout assert "ZeroDivisionError: division by zero" in result.stdout def test_wsgi_app_missing(testdir, cli): module = testdir.make_importable_pyfile( location=""" from test.apps.openapi._flask import create_app """ ) result = cli.run("/schema.yaml", "--app", f"{module.purebasename}:app") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout lines = result.stdout.strip().split("\n") assert "AttributeError: module 'location' has no attribute 'app'" in lines assert "Can not import application from the given module!" in lines def test_wsgi_app_internal_exception(testdir, cli): module = testdir.make_importable_pyfile( location=""" from test.apps.openapi._flask import create_app app = create_app() app.config["internal_exception"] = True """ ) result = cli.run("/schema.yaml", "--app", f"{module.purebasename}:app", "--hypothesis-derandomize") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout lines = result.stdout.strip().split("\n") assert "== APPLICATION LOGS ==" in lines[48], result.stdout.strip() assert "ERROR in app: Exception on /api/success [GET]" in lines[50] assert lines[66] == "ZeroDivisionError: division by zero" @pytest.mark.parametrize("args", ((), ("--base-url",))) def test_aiohttp_app(openapi_version, request, cli, loadable_aiohttp_app, args): # When a URL is passed together with app if args: args += (request.getfixturevalue("base_url"),) result = cli.run("/schema.yaml", "--app", loadable_aiohttp_app, *args) # Then the schema should be loaded from that URL assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert "1 passed, 1 failed in" in result.stdout def test_wsgi_app_remote_schema(cli, schema_url, loadable_flask_app): # When a URL is passed together with app result = cli.run(schema_url, "--app", loadable_flask_app) # Then the schema should be loaded from that URL assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout assert "1 passed, 1 failed in" in result.stdout def test_wsgi_app_path_schema(cli, loadable_flask_app): # When an existing path to schema is passed together with app result = cli.run(SIMPLE_PATH, "--app", loadable_flask_app) # Then the schema should be loaded from that path assert result.exit_code == ExitCode.OK, result.stdout assert "1 passed in" in result.stdout def test_multipart_upload(testdir, tmp_path, hypothesis_max_examples, openapi3_base_url, cli): cassette_path = tmp_path / "output.yaml" # When requestBody has a binary field or an array of binary items responses = {"200": {"description": "OK", "content": {"application/json": {"schema": {"type": "object"}}}}} schema = { "openapi": "3.0.0", "info": {"title": "Sample API", "description": "API description in Markdown.", "version": "1.0.0"}, "paths": { "/property": { "post": { "requestBody": { "required": True, "content": { "multipart/form-data": { "schema": { "type": "object", "properties": {"file": {"type": "string", "format": "binary"}}, "required": ["file"], } } }, }, "responses": responses, } }, "/array": { "post": { "requestBody": { "required": True, "content": { "multipart/form-data": { "schema": { "type": "object", "properties": { "files": {"type": "array", "items": {"type": "string", "format": "binary"}} }, "required": ["files"], } } }, }, "responses": responses, } }, }, "servers": [{"url": "https://api.example.com/{basePath}", "variables": {"basePath": {"default": "v1"}}}], } schema_file = testdir.makefile(".yaml", schema=yaml.dump(schema)) result = cli.run( str(schema_file), f"--base-url={openapi3_base_url}", f"--hypothesis-max-examples={hypothesis_max_examples or 5}", "--show-errors-tracebacks", "--hypothesis-derandomize", f"--store-network-log={cassette_path}", ) # Then it should be correctly sent to the server assert result.exit_code == ExitCode.OK, result.stdout assert "= ERRORS =" not in result.stdout with cassette_path.open() as fd: cassette = yaml.safe_load(fd) def decode(idx): request = cassette["http_interactions"][idx]["request"] if "body" not in request: return None return base64.b64decode(request["body"]["base64_string"]) first_decoded = decode(0) if first_decoded: assert b'Content-Disposition: form-data; name="files"; filename="files"\r\n' in first_decoded last_decoded = decode(-1) if last_decoded: assert b'Content-Disposition: form-data; name="file"; filename="file"\r\n' in last_decoded # NOTE, that the actual API operation is not checked in this test @pytest.mark.operations("form") def test_urlencoded_form(cli, cli_args): # When the API operation accepts application/x-www-form-urlencoded result = cli.run(*cli_args) # Then Schemathesis should generate appropriate payload assert result.exit_code == ExitCode.OK, result.stdout @pytest.mark.parametrize("workers", (1, 2)) @pytest.mark.operations("success") def test_targeted(mocker, cli, cli_args, workers): target = mocker.spy(hypothesis, "target") result = cli.run(*cli_args, f"--workers={workers}", "--target=response_time") assert result.exit_code == ExitCode.OK, result.stdout target.assert_called_with(mocker.ANY, label="response_time") def test_chained_internal_exception(testdir, cli, hypothesis_max_examples, openapi3_base_url): # When schema contains an error that causes an internal error in `jsonschema` raw_schema = { "openapi": "3.0.2", "info": {"title": "Test", "description": "Test", "version": "0.1.0"}, "paths": { "/users": { "get": { "responses": { # Response code should be a string 200: {"description": "OK", "content": {"application/json": {"schema": {"type": "object"}}}} } } } }, } schema_file = testdir.makefile(".yaml", schema=yaml.dump(raw_schema)) result = cli.run( str(schema_file), f"--base-url={openapi3_base_url}", f"--hypothesis-max-examples={hypothesis_max_examples or 1}", "--show-errors-tracebacks", "--validate-schema=true", ) assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout lines = result.stdout.splitlines() assert "The above exception was the direct cause of the following exception:" in lines @pytest.mark.parametrize( "options, expected", ( ( ("--skip-deprecated-operations",), "Collected API operations: 1", ), ( (), "Collected API operations: 2", ), ), ) def test_skip_deprecated_operations(testdir, cli, openapi3_base_url, options, expected): # When there are some deprecated API operations definition = { "responses": {"200": {"description": "OK", "content": {"application/json": {"schema": {"type": "object"}}}}} } raw_schema = { "openapi": "3.0.2", "info": {"title": "Test", "description": "Test", "version": "0.1.0"}, "paths": { "/users": { "get": definition, "post": { "deprecated": True, **definition, }, } }, } schema_file = testdir.makefile(".yaml", schema=yaml.dump(raw_schema)) result = cli.run(str(schema_file), f"--base-url={openapi3_base_url}", "--hypothesis-max-examples=1", *options) assert result.exit_code == ExitCode.OK, result.stdout # Then only not deprecated API operations should be selected assert expected in result.stdout.splitlines() @pytest.fixture() def fast_api_fixup(): yield fixups.uninstall() @pytest.mark.parametrize("fixup", ("all", "fast_api")) def test_fast_api_fixup(testdir, cli, base_url, fast_api_schema, hypothesis_max_examples, fast_api_fixup, fixup): # When schema contains Draft 7 definitions as ones from FastAPI may contain schema_file = testdir.makefile(".yaml", schema=yaml.dump(fast_api_schema)) result = cli.run( str(schema_file), f"--base-url={base_url}", f"--hypothesis-max-examples={hypothesis_max_examples or 1}", f"--fixups={fixup}", ) assert result.exit_code == ExitCode.OK, result.stdout @pytest.mark.operations("success") def test_colon_in_headers(cli, schema_url, app): header = "X-FOO" value = "bar:spam" result = cli.run(schema_url, f"--header={header}:{value}") assert result.exit_code == ExitCode.OK assert app["incoming_requests"][0].headers[header] == value @pytest.mark.operations("create_user", "get_user", "update_user") def test_openapi_links(cli, cli_args, schema_url, hypothesis_max_examples): # When the schema contains Open API links or Swagger 2 extension for links # And these links are nested - API operations in these links contain links to another operations result = cli.run( *cli_args, f"--hypothesis-max-examples={hypothesis_max_examples or 2}", "--hypothesis-seed=1", "--hypothesis-derandomize", "--hypothesis-deadline=None", "--show-errors-tracebacks", ) lines = result.stdout.splitlines() # Note, it might fail if it uncovers the placed bug, which this version of stateful testing should not uncover # It is pretty rare and requires a high number for the `max_examples` setting. This version is staged for removal # Therefore it won't be fixed assert result.exit_code == ExitCode.OK, result.stdout # Then these links should be tested # And lines with the results of these tests should be indented assert lines[11].startswith(" -> GET /api/users/{user_id} .") # And percentage should be adjusted appropriately assert lines[11].endswith("[ 50%]") assert lines[12].startswith(" -> PATCH /api/users/{user_id} .") assert lines[12].endswith("[ 60%]") assert lines[13].startswith(" -> PATCH /api/users/{user_id} .") assert lines[13].endswith("[ 66%]") @pytest.mark.operations("create_user", "get_user", "update_user") def test_openapi_links_disabled(cli, schema_url, hypothesis_max_examples): # When the user disabled Open API links usage result = cli.run( schema_url, f"--hypothesis-max-examples={hypothesis_max_examples or 2}", "--hypothesis-seed=1", "--hypothesis-derandomize", "--hypothesis-deadline=None", "--show-errors-tracebacks", "--stateful=none", ) lines = result.stdout.splitlines() assert result.exit_code == ExitCode.OK, result.stdout # Then the links should not be traversed assert lines[10].startswith("POST /api/users/ .") assert lines[11].startswith("GET /api/users/{user_id} .") assert lines[12].startswith("PATCH /api/users/{user_id} .") @pytest.mark.parametrize("recursion_limit, expected", ((1, "....."), (5, "......"))) @pytest.mark.operations("create_user", "get_user", "update_user") def test_openapi_links_multiple_threads(cli, cli_args, schema_url, recursion_limit, hypothesis_max_examples, expected): # When the schema contains Open API links or Swagger 2 extension for links # And these links are nested - API operations in these links contain links to another operations result = cli.run( *cli_args, f"--hypothesis-max-examples={hypothesis_max_examples or 1}", "--hypothesis-seed=1", "--hypothesis-derandomize", "--hypothesis-deadline=None", "--hypothesis-suppress-health-check=too_slow,filter_too_much", "--show-errors-tracebacks", f"--stateful-recursion-limit={recursion_limit}", "--workers=2", ) lines = result.stdout.splitlines() assert result.exit_code == ExitCode.OK, result.stdout assert lines[10] == expected + "." if hypothesis_max_examples else expected def test_get_request_with_body(testdir, cli, base_url, hypothesis_max_examples, schema_with_get_payload): schema_file = testdir.makefile(".yaml", schema=yaml.dump(schema_with_get_payload)) result = cli.run( str(schema_file), f"--base-url={base_url}", f"--hypothesis-max-examples={hypothesis_max_examples or 1}", "--show-errors-tracebacks", "--validate-schema=true", ) assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout lines = result.stdout.splitlines() assert "InvalidSchema: Body parameters are defined for GET request." in lines @pytest.mark.operations("slow") @pytest.mark.parametrize("workers", (1, 2)) def test_max_response_time_invalid(cli, server, schema_url, workers): # When maximum response time check is specified in the CLI and the request takes more time result = cli.run(schema_url, "--max-response-time=50", f"--workers={workers}") # Then the whole Schemathesis run should fail assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # And the given operation should be displayed as a failure lines = result.stdout.split("\n") if workers == 1: assert lines[10].startswith("GET /api/slow F") else: assert lines[10].startswith("F") # And the proper error message should be displayed assert "max_response_time 0 / 2 passed FAILED" in result.stdout assert "Response time exceeded the limit of 50 ms" in result.stdout @pytest.mark.operations("slow") def test_max_response_time_valid(cli, server, schema_url): # When maximum response time check is specified in the CLI and the request takes less time result = cli.run(schema_url, "--max-response-time=200") # Then no errors should occur assert result.exit_code == ExitCode.OK, result.stdout @pytest.mark.parametrize("openapi_version", (OpenAPIVersion("3.0"),)) @pytest.mark.parametrize("header", ("Authorization", "authorization")) @pytest.mark.operations() def test_auth_and_authorization_header_are_disallowed(cli, schema_url, header, openapi_version): # When ``--auth`` is passed together with ``--header`` that sets the ``Authorization`` header result = cli.run(schema_url, "--auth=test:test", f"--header={header}:token123") # Then it causes a validation error assert result.exit_code == ExitCode.INTERRUPTED assert ( "Invalid value: Passing `--auth` together with `--header` that sets `Authorization` is not allowed." in result.stdout ) @pytest.mark.parametrize("workers_num", (1, 2)) @pytest.mark.parametrize("openapi_version", (OpenAPIVersion("3.0"),)) @pytest.mark.operations("failure", "success") def test_exit_first(cli, schema_url, openapi_version, workers_num, mocker): # When the `--exit-first` CLI option is passed # And a failure occurs stop_worker = mocker.spy(threadpool, "stop_worker") result = cli.run(schema_url, "--exitfirst", "-w", str(workers_num)) # Then tests are failed assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout if workers_num == 1: lines = result.stdout.split("\n") # And the execution should stop on the first failure for idx, line in enumerate(lines): if line.startswith("GET /api/failure F"): assert line.endswith("[ 50%]") break else: pytest.fail("Line is not found") # the "FAILURES" sections goes after a new line, rather then continuing to the next operation next_line = lines[idx + 1] assert next_line == "" assert "FAILURES" in lines[idx + 2] else: stop_worker.assert_called() @pytest.mark.parametrize("openapi_version", (OpenAPIVersion("3.0"),)) def test_base_url_not_required_for_dry_run(testdir, cli, openapi_version, empty_open_api_3_schema): schema_file = testdir.makefile(".yaml", schema=yaml.dump(empty_open_api_3_schema)) result = cli.run(str(schema_file), "--dry-run") assert result.exit_code == ExitCode.OK, result.stdout def test_long_operation_output(testdir, empty_open_api_3_schema): # See GH-990 # When there is a narrow screen # And the API schema contains an operation with a long name empty_open_api_3_schema["paths"] = { f"/{'a' * 100}": { "get": { "responses": {"200": {"description": "OK"}}, } }, f"/{'a' * 10}": { "get": { "responses": {"200": {"description": "OK"}}, } }, } schema_file = testdir.makefile(".yaml", schema=yaml.dump(empty_open_api_3_schema)) result = testdir.run("schemathesis", "run", str(schema_file), "--dry-run") # Then this operation name should be truncated assert result.ret == ExitCode.OK assert "GET /aaaaaaaaaa . [ 50%]" in result.outlines assert "GET /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa[...] . [100%]" in result.outlines def test_reserved_characters_in_operation_name(testdir, empty_open_api_3_schema): # See GH-992 # When an API operation name contains `:` empty_open_api_3_schema["paths"] = { "/foo:bar": { "get": { "responses": {"200": {"description": "OK"}}, } }, } schema_file = testdir.makefile(".yaml", schema=yaml.dump(empty_open_api_3_schema)) result = testdir.run("schemathesis", "run", str(schema_file), "--dry-run") # Then this operation name should be displayed with the leading `/` assert result.ret == ExitCode.OK assert "GET /foo:bar . [100%]" in result.outlines def test_error_during_example_generation(testdir, cli): # See GH-994 # When the API schema is in YAML # And contains an unquoted value, that is casted to boolean # And it is behind references # And there are examples of another parameter content = """ swagger: "2.0" basePath: / info: description: Test title: Test version: 1.0 parameters: Bar: in: body name: payload required: true schema: properties: name: example: test type: string type: object paths: /test: post: parameters: - $ref: "#/parameters/Bar" - in: query name: test type: string responses: "201": description: Ok definitions: Foo: properties: bar: example: foo type: string # Should be quoted on: example: true type: boolean type: object """ schema_file = testdir.makefile(".yaml", schema=content) result = cli.run(str(schema_file), "--dry-run", "--validate-schema=false") # Then the run should not be interrupted, but the run fails assert result.exit_code == ExitCode.TESTS_FAILED assert " The API schema contains non-string keys" in result.stdout def test_unsupported_regex(testdir, cli, empty_open_api_3_schema): def make_definition(min_items): return { "post": { "requestBody": { "required": True, "content": { "application/json": { "schema": { "type": "array", # Java-style regular expression "items": {"type": "string", "pattern": r"\p{Alpha}"}, "maxItems": 3, "minItems": min_items, } } }, }, "responses": {"200": {"description": "OK"}}, } } # When an operation uses an unsupported regex syntax empty_open_api_3_schema["paths"] = { # Can't generate anything "/foo": make_definition(min_items=1), # Can generate an empty array "/bar": make_definition(min_items=0), } schema_file = testdir.makefile(".yaml", schema=yaml.dump(empty_open_api_3_schema)) result = cli.run(str(schema_file), "--dry-run", "--hypothesis-max-examples=1") # Then if it is possible it should generate at least something assert "POST /bar ." in result.stdout # And if it is not then there should be an error with a descriptive error message assert "POST /foo E" in result.stdout lines = result.stdout.splitlines() for idx, line in enumerate(lines): if "__ POST /foo [P] __" in line: break else: pytest.fail("Line not found") assert r"Got pattern='\\p{Alpha}', but this is not valid syntax for a Python regular expression" in lines[idx + 1] @pytest.mark.parametrize("extra", ("--auth='test:wrong'", "-H Authorization: Basic J3Rlc3Q6d3Jvbmcn")) @pytest.mark.operations("basic") def test_auth_override_on_protected_operation(cli, base_url, schema_url, extra): # See GH-792 # When the tested API operation has basic auth # And the auth is overridden (directly or via headers) result = cli.run(schema_url, "--checks=all", extra) # And there is an error during testing assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout lines = result.stdout.splitlines() # Then request representation in the output should have the overridden value assert ( lines[18] == f"Headers : {{'Authorization': 'Basic J3Rlc3Q6d3Jvbmcn', 'User-Agent': '{USER_AGENT}'," f" 'Accept-Encoding': 'gzip, deflate', 'Accept': '*/*', 'Connection': 'keep-alive'}}" ) # And code sample as well assert ( lines[26] == f" curl -X GET -H 'Accept: */*' -H 'Accept-Encoding: gzip, deflate' -H 'Authorization: Basic J3Rlc3Q6d3Jvbmcn' -H 'Connection: keep-alive' -H 'User-Agent: {USER_AGENT}' {base_url}/basic" ) @pytest.mark.parametrize("openapi_version", (OpenAPIVersion("3.0"),)) @pytest.mark.operations("flaky") def test_explicit_headers_in_output_on_errors(cli, base_url, schema_url, openapi_version): # When there is a non-fatal error during testing (e.g. flakiness) # And custom headers were passed explicitly auth = "Basic J3Rlc3Q6d3Jvbmcn" result = cli.run(schema_url, "--checks=all", f"-H Authorization: {auth}") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout lines = result.stdout.splitlines() # Then request representation in the output should have the overridden value assert lines[17] == f"Headers : {{'Authorization': '{auth}', 'User-Agent': '{USER_AGENT}'}}" # And code sample as well assert lines[22].startswith( f" curl -X GET -H 'Authorization: {auth}' -H 'User-Agent: {USER_AGENT}' '{base_url}/flaky?id=0'" ) @pytest.mark.parametrize("openapi_version", (OpenAPIVersion("3.0"),)) @pytest.mark.operations("__all__") def test_debug_output(tmp_path, cli, schema_url, openapi_version, hypothesis_max_examples): # When the `--debug-output-file` option is passed debug_file = tmp_path / "debug.jsonl" cassette_path = tmp_path / "output.yaml" result = cli.run( schema_url, f"--debug-output-file={debug_file}", "--validate-schema=false", f"--hypothesis-max-examples={hypothesis_max_examples or 1}", f"--store-network-log={cassette_path}", ) assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # Then all underlying runner events should be stored as JSONL file assert debug_file.exists() with debug_file.open(encoding="utf-8") as fd: lines = fd.readlines() for line in lines: json.loads(line) # And statuses are encoded as strings assert list(json.loads(lines[-1])["total"]["not_a_server_error"]) == ["success", "total", "failure"] @pytest.mark.operations("cp866") def test_response_payload_encoding(cli, cli_args): # See GH-1073 # When the "failed" response has non UTF-8 encoding result = cli.run(*cli_args, "--checks=all") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # Then it should be displayed according its actual encoding assert "Response payload: `Тест`" in result.stdout.splitlines() @pytest.mark.operations("conformance") def test_response_schema_conformance_deduplication(cli, cli_args): # See GH-907 # When the "response_schema_conformance" check is present # And the app return different error messages caused by the same validator result = cli.run(*cli_args, "--checks=response_schema_conformance") assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout # Then the errors should be deduplicated assert result.stdout.count("Response payload: ") == 1 @pytest.mark.parametrize("kind", ("env_var", "arg")) @pytest.mark.parametrize("openapi_version", (OpenAPIVersion("3.0"),)) @pytest.mark.operations("success") def test_no_color(monkeypatch, cli, schema_url, kind): args = (schema_url,) if kind == "env_var": monkeypatch.setenv("NO_COLOR", "1") if kind == "arg": args += ("--no-color",) result = cli.run(*args, color=True) assert result.exit_code == ExitCode.OK, result.stdout assert "[1m" not in result.stdout @pytest.mark.parametrize("graphql_path", ("/graphql", "/foo")) def test_graphql_url(cli, graphql_url, graphql_path): # When the target API is GraphQL result = cli.run(graphql_url, "--hypothesis-max-examples=5") assert_graphql(result) def test_graphql_asgi(cli, loadable_graphql_fastapi_app, graphql_path): # When the target API is GraphQL result = cli.run(f"--app={loadable_graphql_fastapi_app}", "--hypothesis-max-examples=5", graphql_path) assert_graphql(result) def assert_graphql(result): assert result.exit_code == ExitCode.OK, result.stdout # Then it should be detected automatically assert "Specification version: GraphQL" in result.stdout assert "getBooks . " in result.stdout assert "getAuthors . " in result.stdout def assert_exit_code(event_stream, code): with pytest.raises(SystemExit) as exc: execute( event_stream, workers_num=1, show_errors_tracebacks=False, validate_schema=False, store_network_log=None, junit_xml=None, verbosity=0, code_sample_style=CodeSampleStyle.default(), debug_output_file=None, schemathesis_io_token=None, schemathesis_io_url=service.DEFAULT_URL, api_slug=None, ) assert exc.value.code == code def test_cli_execute(swagger_20, capsys): event_stream = from_schema(swagger_20).execute() for _ in event_stream: pass assert_exit_code(event_stream, 1) assert capsys.readouterr().out.strip() == "Unexpected error" def test_get_exit_code(swagger_20, capsys): event_stream = from_schema(swagger_20).execute() next(event_stream) event = next(event_stream) assert get_exit_code(event) == 1 @pytest.mark.parametrize("base_url", (None, "http://127.0.0.1/apiv2")) @pytest.mark.parametrize("location", ("path", "query", "header", "cookie")) def test_missing_content_and_schema(cli, base_url, tmp_path, testdir, empty_open_api_3_schema, location): debug_file = tmp_path / "debug.jsonl" # When an Open API 3 parameter is missing `schema` & `content` empty_open_api_3_schema["paths"] = { "/foo": {"get": {"parameters": [{"in": location, "name": "X-Foo", "required": True}]}} } schema_file = testdir.makefile(".json", schema=json.dumps(empty_open_api_3_schema)) args = [ str(schema_file), f"--debug-output-file={debug_file}", "--dry-run", "--validate-schema=false", "--hypothesis-max-examples=1", ] if base_url is not None: args.append(f"--base-url={base_url}") result = cli.run(*args) lines = result.stdout.split("\n") # Then CLI should show that this API operation errored # And show the proper message under its "ERRORS" section if base_url is None: assert lines[10].startswith("GET /foo E") else: assert lines[10].startswith("GET /apiv2/foo E") assert "_ GET /apiv2/foo [P] _" in lines[13] assert ( lines[14] == f'InvalidSchema: Can not generate data for {location} parameter "X-Foo"! ' "It should have either `schema` or `content` keywords defined" ) # And emitted Before / After event pairs have the same correlation ids with debug_file.open(encoding="utf-8") as fd: events = [json.loads(line) for line in fd] assert events[1]["correlation_id"] == events[2]["correlation_id"] # And they should have the same "verbose_name" assert events[1]["verbose_name"] == events[2]["verbose_name"]
import glob import shutil from pathlib import Path import src.calculate_energy_density as energy_func import src.calculate_h1 as h1_func import src.calculate_sfr as sfr_func import src.helper as helper import src.calculate_surface_density as surf_func import src.calculate_radio_sfr as radio_sfr def copy_to_out(config: dict): Path(config["out_directory"]).mkdir(parents=True, exist_ok=True) l = ["energy_density_combined", "energy_density_h1", "energy_density_h2", "h1_combined", "h2_surface_density_combined", "radio_sfr_combined", "sfr_combined", "surface_density_combined"] for file in l: shutil.copy(config["data_directory"] + "/" + file + ".pdf", config["out_directory"]) shutil.copy(config["data_directory"] + "/" + file + "_mean.pdf", config["out_directory"]) shutil.copy(config["data_directory"] + "/" + file + "_smooth.pdf", config["out_directory"]) for galaxy in config["galaxies"]: name = galaxy["name"] magnetic_dir = helper.get_magnetic_galaxy_dir(name, config["data_directory"]) sfr_dir = sfr_func.get_path_to_sfr_dir(name, config["data_directory"]) h1_dir = h1_func.get_path_to_h1_dir(name, config["data_directory"]) energy_dir = energy_func.get_path_to_energy_density_dir( name, config["data_directory"]) surf_dir = surf_func.get_path_to_surface_density_dir( name, config["data_directory"]) radio_sfr_dir = radio_sfr.get_path_to_radio_sfr_dir(name, config["data_directory"]) for files in [glob.glob(magnetic_dir + f"/*magnetic{"_non_thermal" if galaxy["use_thermal"] else ""}.pdf"), glob.glob(magnetic_dir + f"/*magnetic{"_non_thermal" if galaxy["use_thermal"] else ""}_overlay.pdf")]: for file in files: shutil.copy(file, config["out_directory"]) for files in [glob.glob(sfr_dir + "/*_pixel.pdf"), glob.glob(sfr_dir + "/*_overlay.pdf"), glob.glob(sfr_dir + "/*_pixel_smooth.pdf")]: for file in files: shutil.copy(file, config["out_directory"]) for files in [glob.glob(h1_dir + "/*_pixel.pdf"), glob.glob(h1_dir + "/*_overlay.pdf"), glob.glob(h1_dir + "/*_pixel_smooth.pdf")]: for file in files: shutil.copy(file, config["out_directory"]) for files in [glob.glob(energy_dir + "/*_pixel.pdf"), glob.glob(energy_dir + "/*_overlay.pdf"), glob.glob(energy_dir + "/*_pixel_smooth.pdf")]: for file in files: shutil.copy(file, config["out_directory"]) for files in [glob.glob(surf_dir + "/*_pixel.pdf"), glob.glob(surf_dir + "/*_overlay.pdf"), glob.glob(surf_dir + "/*_pixel_smooth.pdf")]: for file in files: shutil.copy(file, config["out_directory"]) for files in [glob.glob(radio_sfr_dir + "/*_pixel.pdf"), glob.glob(radio_sfr_dir + "/*_overlay.pdf"), glob.glob(radio_sfr_dir + "/*_pixel_smooth.pdf")]: for file in files: shutil.copy(file, config["out_directory"])
import glob import shutil from pathlib import Path import src.calculate_energy_density as energy_func import src.calculate_h1 as h1_func import src.calculate_sfr as sfr_func import src.helper as helper import src.calculate_surface_density as surf_func import src.calculate_radio_sfr as radio_sfr def copy_to_out(config: dict): Path(config["out_directory"]).mkdir(parents=True, exist_ok=True) l = ["energy_density_combined", "energy_density_h1", "energy_density_h2", "h1_combined", "h2_surface_density_combined", "radio_sfr_combined", "sfr_combined", "surface_density_combined"] for file in l: shutil.copy(config["data_directory"] + "/" + file + ".pdf", config["out_directory"]) shutil.copy(config["data_directory"] + "/" + file + "_mean.pdf", config["out_directory"]) shutil.copy(config["data_directory"] + "/" + file + "_smooth.pdf", config["out_directory"]) for galaxy in config["galaxies"]: name = galaxy["name"] magnetic_dir = helper.get_magnetic_galaxy_dir(name, config["data_directory"]) sfr_dir = sfr_func.get_path_to_sfr_dir(name, config["data_directory"]) h1_dir = h1_func.get_path_to_h1_dir(name, config["data_directory"]) energy_dir = energy_func.get_path_to_energy_density_dir( name, config["data_directory"]) surf_dir = surf_func.get_path_to_surface_density_dir( name, config["data_directory"]) radio_sfr_dir = radio_sfr.get_path_to_radio_sfr_dir(name, config["data_directory"]) for files in [glob.glob(magnetic_dir + f"/*magnetic{'_non_thermal' if galaxy['use_thermal'] else ''}.pdf"), glob.glob(magnetic_dir + f"/*magnetic{'_non_thermal' if galaxy['use_thermal'] else ''}_overlay.pdf")]: for file in files: shutil.copy(file, config["out_directory"]) for files in [glob.glob(sfr_dir + "/*_pixel.pdf"), glob.glob(sfr_dir + "/*_overlay.pdf"), glob.glob(sfr_dir + "/*_pixel_smooth.pdf")]: for file in files: shutil.copy(file, config["out_directory"]) for files in [glob.glob(h1_dir + "/*_pixel.pdf"), glob.glob(h1_dir + "/*_overlay.pdf"), glob.glob(h1_dir + "/*_pixel_smooth.pdf")]: for file in files: shutil.copy(file, config["out_directory"]) for files in [glob.glob(energy_dir + "/*_pixel.pdf"), glob.glob(energy_dir + "/*_overlay.pdf"), glob.glob(energy_dir + "/*_pixel_smooth.pdf")]: for file in files: shutil.copy(file, config["out_directory"]) for files in [glob.glob(surf_dir + "/*_pixel.pdf"), glob.glob(surf_dir + "/*_overlay.pdf"), glob.glob(surf_dir + "/*_pixel_smooth.pdf")]: for file in files: shutil.copy(file, config["out_directory"]) for files in [glob.glob(radio_sfr_dir + "/*_pixel.pdf"), glob.glob(radio_sfr_dir + "/*_overlay.pdf"), glob.glob(radio_sfr_dir + "/*_pixel_smooth.pdf")]: for file in files: shutil.copy(file, config["out_directory"])
import logging import time from typing import List from spaceone.core.utils import * from spaceone.inventory.connector.aws_kinesis_data_stream_connector.schema.data import StreamDescription, Consumers from spaceone.inventory.connector.aws_kinesis_data_stream_connector.schema.resource import StreamResource, KDSResponse from spaceone.inventory.connector.aws_kinesis_data_stream_connector.schema.service_type import CLOUD_SERVICE_TYPES from spaceone.inventory.libs.connector import SchematicAWSConnector _LOGGER = logging.getLogger(__name__) class KinesisDataStreamConnector(SchematicAWSConnector): service_name = "kinesis" cloud_service_group = 'KinesisDataStream' cloud_service_type = 'DataStream' def get_resources(self): _LOGGER.debug("[get_resources] START: Kinesis Data Stream") resources = [] start_time = time.time() collect_resources = [ { "request_method": self.request_data, "resource": StreamResource, "response_schema": KDSResponse, } ] for cst in CLOUD_SERVICE_TYPES: resources.append(cst) for region_name in self.region_names: self.reset_region(region_name) for collect_resource in collect_resources: resources.extend( self.collect_data_by_region( self.service_name, region_name, collect_resource ) ) _LOGGER.debug(f'[get_resources] FINISHED: Kinesis Data Stream ({time.time() - start_time} sec)') return resources def request_data(self, region_name) -> List[StreamDescription]: paginator = self.client.get_paginator("list_streams") response_iterator = paginator.paginate( PaginationConfig={ "MaxItems": 10000, "PageSize": 50, } ) for data in response_iterator: for stream_name in data.get("StreamNames", []): try: stream_response = self.client.describe_stream(StreamName=stream_name) stream_info = stream_response.get("StreamDescription", {}) num_of_con, consumers = self.get_consumers(stream_info.get("StreamARN")) stream_info.update( { "stream_status_display": self.get_stream_status_display( stream_info.get("StreamStatus") ), "retention_period_days": self.get_retention_period_days( stream_info.get("RetentionPeriodHours") ), "retention_period_display": self.get_retention_period_display( stream_info.get("RetentionPeriodHours") ), "retention_period_display_hours": f"{stream_info.get("RetentionPeriodHours")} hours", "encryption_display": self.get_encryption_display( stream_info.get("EncryptionType") ), "shard_level_metrics_display": self.get_shard_level_metrics_display( stream_info.get("EnhancedMonitoring") ), "open_shards_num": self.get_open_shards_num( stream_info.get("Shards") ), "closed_shards_num": self.get_closed_shards_num( stream_info.get("Shards") ), "consumers_vo": { "num_of_consumers": num_of_con, "consumers": consumers, }, "tags": self.get_tags(stream_info.get("StreamName")), "account_id": self.account_id, } ) stream_vo = StreamDescription(stream_info, strict=False) yield { 'data': stream_vo, 'instance_size': float(stream_vo.open_shards_num), 'launched_at': self.datetime_to_iso8601(stream_vo.stream_creation_timestamp), 'name': stream_vo.stream_name, 'account': self.account_id } except Exception as e: resource_id = stream_name error_resource_response = self.generate_error(region_name, resource_id, e) yield {'data': error_resource_response} def get_tags(self, name): tag_response = self.client.list_tags_for_stream(StreamName=name) return tag_response.get("Tags", []) def get_consumers(self, arn): consumer_response = self.client.list_stream_consumers(StreamARN=arn) consumers_info = consumer_response.get("Consumers", []) consumers_num = len(consumers_info) consumers = [] for consumer_info in consumers_info: consumer_info.update( { "consumer_status_display": self.get_consumers_status_display( consumer_info.get("ConsumerStatus") ), } ) consumers.append(Consumers(consumer_info, strict=False)) return consumers_num, consumers @staticmethod def get_consumers_num(consumers): return len(consumers) @staticmethod def get_consumers_status_display(raw_status): return raw_status[0] + raw_status[1:].lower() @staticmethod def get_stream_status_display(raw_status): return raw_status[0] + raw_status[1:].lower() @staticmethod def get_retention_period_days(retention_period_hours): return int(retention_period_hours / 24) @staticmethod def get_retention_period_display(retention_period_hours): day = int(retention_period_hours / 24) hour = int(retention_period_hours % 24) day_postfix = f"{day} day" if day == 1 else ("" if not day else f"{day} days") hour_postfix = ( f" {hour} hour" if hour == 1 else ("" if not hour else f" {hour} hours") ) return day_postfix + hour_postfix @staticmethod def get_encryption_display(raw_encryption): return "Disabled" if raw_encryption == "NONE" else "Enabled" @staticmethod def get_shard_level_metrics_display(enhanced_monitoring): return ( ["Disabled"] if not enhanced_monitoring[0]["ShardLevelMetrics"] else enhanced_monitoring[0]["ShardLevelMetrics"] ) @staticmethod def get_open_shards_num(shards_list): return len( [ shard for shard in shards_list if shard.get("SequenceNumberRange", {}).get("EndingSequenceNumber") is None ] ) @staticmethod def get_closed_shards_num(shards_list): return len( [ shard for shard in shards_list if shard.get("SequenceNumberRange", {}).get("EndingSequenceNumber") is not None ] )
import logging import time from typing import List from spaceone.core.utils import * from spaceone.inventory.connector.aws_kinesis_data_stream_connector.schema.data import StreamDescription, Consumers from spaceone.inventory.connector.aws_kinesis_data_stream_connector.schema.resource import StreamResource, KDSResponse from spaceone.inventory.connector.aws_kinesis_data_stream_connector.schema.service_type import CLOUD_SERVICE_TYPES from spaceone.inventory.libs.connector import SchematicAWSConnector _LOGGER = logging.getLogger(__name__) class KinesisDataStreamConnector(SchematicAWSConnector): service_name = "kinesis" cloud_service_group = 'KinesisDataStream' cloud_service_type = 'DataStream' def get_resources(self): _LOGGER.debug("[get_resources] START: Kinesis Data Stream") resources = [] start_time = time.time() collect_resources = [ { "request_method": self.request_data, "resource": StreamResource, "response_schema": KDSResponse, } ] for cst in CLOUD_SERVICE_TYPES: resources.append(cst) for region_name in self.region_names: self.reset_region(region_name) for collect_resource in collect_resources: resources.extend( self.collect_data_by_region( self.service_name, region_name, collect_resource ) ) _LOGGER.debug(f'[get_resources] FINISHED: Kinesis Data Stream ({time.time() - start_time} sec)') return resources def request_data(self, region_name) -> List[StreamDescription]: paginator = self.client.get_paginator("list_streams") response_iterator = paginator.paginate( PaginationConfig={ "MaxItems": 10000, "PageSize": 50, } ) for data in response_iterator: for stream_name in data.get("StreamNames", []): try: stream_response = self.client.describe_stream(StreamName=stream_name) stream_info = stream_response.get("StreamDescription", {}) num_of_con, consumers = self.get_consumers(stream_info.get("StreamARN")) stream_info.update( { "stream_status_display": self.get_stream_status_display( stream_info.get("StreamStatus") ), "retention_period_days": self.get_retention_period_days( stream_info.get("RetentionPeriodHours") ), "retention_period_display": self.get_retention_period_display( stream_info.get("RetentionPeriodHours") ), "retention_period_display_hours": f"{stream_info.get('RetentionPeriodHours')} hours", "encryption_display": self.get_encryption_display( stream_info.get("EncryptionType") ), "shard_level_metrics_display": self.get_shard_level_metrics_display( stream_info.get("EnhancedMonitoring") ), "open_shards_num": self.get_open_shards_num( stream_info.get("Shards") ), "closed_shards_num": self.get_closed_shards_num( stream_info.get("Shards") ), "consumers_vo": { "num_of_consumers": num_of_con, "consumers": consumers, }, "tags": self.get_tags(stream_info.get("StreamName")), "account_id": self.account_id, } ) stream_vo = StreamDescription(stream_info, strict=False) yield { 'data': stream_vo, 'instance_size': float(stream_vo.open_shards_num), 'launched_at': self.datetime_to_iso8601(stream_vo.stream_creation_timestamp), 'name': stream_vo.stream_name, 'account': self.account_id } except Exception as e: resource_id = stream_name error_resource_response = self.generate_error(region_name, resource_id, e) yield {'data': error_resource_response} def get_tags(self, name): tag_response = self.client.list_tags_for_stream(StreamName=name) return tag_response.get("Tags", []) def get_consumers(self, arn): consumer_response = self.client.list_stream_consumers(StreamARN=arn) consumers_info = consumer_response.get("Consumers", []) consumers_num = len(consumers_info) consumers = [] for consumer_info in consumers_info: consumer_info.update( { "consumer_status_display": self.get_consumers_status_display( consumer_info.get("ConsumerStatus") ), } ) consumers.append(Consumers(consumer_info, strict=False)) return consumers_num, consumers @staticmethod def get_consumers_num(consumers): return len(consumers) @staticmethod def get_consumers_status_display(raw_status): return raw_status[0] + raw_status[1:].lower() @staticmethod def get_stream_status_display(raw_status): return raw_status[0] + raw_status[1:].lower() @staticmethod def get_retention_period_days(retention_period_hours): return int(retention_period_hours / 24) @staticmethod def get_retention_period_display(retention_period_hours): day = int(retention_period_hours / 24) hour = int(retention_period_hours % 24) day_postfix = f"{day} day" if day == 1 else ("" if not day else f"{day} days") hour_postfix = ( f" {hour} hour" if hour == 1 else ("" if not hour else f" {hour} hours") ) return day_postfix + hour_postfix @staticmethod def get_encryption_display(raw_encryption): return "Disabled" if raw_encryption == "NONE" else "Enabled" @staticmethod def get_shard_level_metrics_display(enhanced_monitoring): return ( ["Disabled"] if not enhanced_monitoring[0]["ShardLevelMetrics"] else enhanced_monitoring[0]["ShardLevelMetrics"] ) @staticmethod def get_open_shards_num(shards_list): return len( [ shard for shard in shards_list if shard.get("SequenceNumberRange", {}).get("EndingSequenceNumber") is None ] ) @staticmethod def get_closed_shards_num(shards_list): return len( [ shard for shard in shards_list if shard.get("SequenceNumberRange", {}).get("EndingSequenceNumber") is not None ] )
""" Copyright 2021 BlazeMeter Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from bzt import TaurusConfigError, ToolError from bzt.modules import ScenarioExecutor from bzt.modules.console import ExecutorWidget from bzt.modules.aggregator import ResultsReader, ConsolidatingAggregator from bzt.utils import RequiredTool, CALL_PROBLEMS, FileReader, shutdown_process class K6Executor(ScenarioExecutor): def __init__(self): super(K6Executor, self).__init__() self.output_file = None self.log_file = None self.script = None self.process = None self.k6 = None self.kpi_file = None def prepare(self): super(K6Executor, self).prepare() self.install_required_tools() self.script = self.get_script_path() if not self.script: raise TaurusConfigError("'script' should be present for k6 executor") self.stdout = open(self.engine.create_artifact("k6", ".out"), "w") self.stderr = open(self.engine.create_artifact("k6", ".err"), "w") self.kpi_file = self.engine.create_artifact("kpi", ".csv") self.reader = K6LogReader(self.kpi_file, self.log) if isinstance(self.engine.aggregator, ConsolidatingAggregator): self.engine.aggregator.add_underling(self.reader) def startup(self): cmdline = [self.k6.tool_name, "run", "--out", f"csv={self.kpi_file}"] load = self.get_load() if load.concurrency: cmdline += ['--vus', str(load.concurrency)] if load.hold: cmdline += ['--duration', str(int(load.hold)) + "s"] if load.iterations: iterations = load.iterations * load.concurrency if load.concurrency else load.iterations cmdline += ['--iterations', str(iterations)] user_cmd = self.settings.get("cmdline") if user_cmd: cmdline += user_cmd.split(" ") cmdline += [self.script] self.process = self._execute(cmdline) def get_widget(self): if not self.widget: label = "%s" % self self.widget = ExecutorWidget(self, "K6: " + label.split('/')[1]) return self.widget def check(self): retcode = self.process.poll() if retcode is not None: ToolError(f"K6 tool exited with non-zero code: {retcode}") return True return False def shutdown(self): shutdown_process(self.process, self.log) def post_process(self): if self.kpi_file: self.engine.existing_artifact(self.kpi_file) super(K6Executor, self).post_process() def install_required_tools(self): self.k6 = self._get_tool(K6, config=self.settings) self.k6.tool_name = self.k6.tool_name.lower() if not self.k6.check_if_installed(): self.k6.install() def resource_files(self): return [self.get_script_path(required=True)] class K6LogReader(ResultsReader): def __init__(self, filename, parent_logger): super(K6LogReader, self).__init__() self.log = parent_logger.getChild(self.__class__.__name__) self.file = FileReader(filename=filename, parent_logger=self.log) self.data = {'timestamp': [], 'label': [], 'r_code': [], 'error_msg': [], 'http_req_duration': [], 'http_req_connecting': [], 'http_req_tls_handshaking': [], 'http_req_waiting': [], 'vus': [], 'data_received': []} self.position = {'timestamp': None, 'metric_value': None, 'error': None, 'expected_response': None, 'name': None, 'status': None} def _read(self, last_pass=False): self.lines = list(self.file.get_lines(size=1024 * 1024, last_pass=last_pass)) for line in self.lines: if line.startswith("metric_name"): parts = line[:-1].split(",") self.position['timestamp'] = parts.index('timestamp') self.position['metric_value'] = parts.index('metric_value') self.position['error'] = parts.index('error') self.position['expected_response'] = parts.index('expected_response') self.position['name'] = parts.index('name') self.position['status'] = parts.index('status') elif line.startswith("http_reqs"): self.data['timestamp'].append(int(line.split(',')[self.position['timestamp']])) self.data['label'].append(line.split(',')[self.position['name']]) self.data['r_code'].append(line.split(',')[self.position['status']]) error = line.split(',')[self.position['error']] if not error and line.split(',')[self.position['expected_response']] == 'false': error = f"Response code: {line.split(",")[self.position["status"]]}" self.data['error_msg'].append(error) elif line.startswith("http_req_duration"): self.data['http_req_duration'].append(float(line.split(',')[self.position['metric_value']])) elif line.startswith("http_req_connecting"): self.data['http_req_connecting'].append(float(line.split(',')[self.position['metric_value']])) elif line.startswith("http_req_tls_handshaking"): self.data['http_req_tls_handshaking'].append(float(line.split(',')[self.position['metric_value']])) elif line.startswith("http_req_waiting"): self.data['http_req_waiting'].append(float(line.split(',')[self.position['metric_value']])) elif line.startswith("vus") and not line.startswith("vus_max"): self.data['vus'].append(int(float(line.split(',')[self.position['metric_value']]))) elif line.startswith("data_received"): self.data['data_received'].append(float(line.split(',')[self.position['metric_value']])) if self.data['vus'] and len(self.data['data_received']) >= self.data['vus'][0] and \ len(self.data['http_req_waiting']) >= self.data['vus'][0]: for i in range(self.data['vus'][0]): kpi_set = ( self.data['timestamp'][0], self.data['label'][0], self.data['vus'][0], self.data['http_req_duration'][0] / 1000, (self.data['http_req_connecting'][0] + self.data['http_req_tls_handshaking'][0]) / 1000, self.data['http_req_waiting'][0] / 1000, self.data['r_code'][0], None if not self.data['error_msg'][0] else self.data['error_msg'][0], '', self.data['data_received'][0]) for key in self.data.keys(): if key != 'vus': self.data[key].pop(0) yield kpi_set self.data['vus'].pop(0) class K6(RequiredTool): def __init__(self, config=None, **kwargs): super(K6, self).__init__(installable=False, **kwargs) def check_if_installed(self): self.log.debug('Checking K6 Framework: %s' % self.tool_path) try: out, err = self.call(['k6', 'version']) except CALL_PROBLEMS as exc: self.log.warning("%s check failed: %s", self.tool_name, exc) return False if err: out += err self.log.debug("K6 output: %s", out) return True
""" Copyright 2021 BlazeMeter Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from bzt import TaurusConfigError, ToolError from bzt.modules import ScenarioExecutor from bzt.modules.console import ExecutorWidget from bzt.modules.aggregator import ResultsReader, ConsolidatingAggregator from bzt.utils import RequiredTool, CALL_PROBLEMS, FileReader, shutdown_process class K6Executor(ScenarioExecutor): def __init__(self): super(K6Executor, self).__init__() self.output_file = None self.log_file = None self.script = None self.process = None self.k6 = None self.kpi_file = None def prepare(self): super(K6Executor, self).prepare() self.install_required_tools() self.script = self.get_script_path() if not self.script: raise TaurusConfigError("'script' should be present for k6 executor") self.stdout = open(self.engine.create_artifact("k6", ".out"), "w") self.stderr = open(self.engine.create_artifact("k6", ".err"), "w") self.kpi_file = self.engine.create_artifact("kpi", ".csv") self.reader = K6LogReader(self.kpi_file, self.log) if isinstance(self.engine.aggregator, ConsolidatingAggregator): self.engine.aggregator.add_underling(self.reader) def startup(self): cmdline = [self.k6.tool_name, "run", "--out", f"csv={self.kpi_file}"] load = self.get_load() if load.concurrency: cmdline += ['--vus', str(load.concurrency)] if load.hold: cmdline += ['--duration', str(int(load.hold)) + "s"] if load.iterations: iterations = load.iterations * load.concurrency if load.concurrency else load.iterations cmdline += ['--iterations', str(iterations)] user_cmd = self.settings.get("cmdline") if user_cmd: cmdline += user_cmd.split(" ") cmdline += [self.script] self.process = self._execute(cmdline) def get_widget(self): if not self.widget: label = "%s" % self self.widget = ExecutorWidget(self, "K6: " + label.split('/')[1]) return self.widget def check(self): retcode = self.process.poll() if retcode is not None: ToolError(f"K6 tool exited with non-zero code: {retcode}") return True return False def shutdown(self): shutdown_process(self.process, self.log) def post_process(self): if self.kpi_file: self.engine.existing_artifact(self.kpi_file) super(K6Executor, self).post_process() def install_required_tools(self): self.k6 = self._get_tool(K6, config=self.settings) self.k6.tool_name = self.k6.tool_name.lower() if not self.k6.check_if_installed(): self.k6.install() def resource_files(self): return [self.get_script_path(required=True)] class K6LogReader(ResultsReader): def __init__(self, filename, parent_logger): super(K6LogReader, self).__init__() self.log = parent_logger.getChild(self.__class__.__name__) self.file = FileReader(filename=filename, parent_logger=self.log) self.data = {'timestamp': [], 'label': [], 'r_code': [], 'error_msg': [], 'http_req_duration': [], 'http_req_connecting': [], 'http_req_tls_handshaking': [], 'http_req_waiting': [], 'vus': [], 'data_received': []} self.position = {'timestamp': None, 'metric_value': None, 'error': None, 'expected_response': None, 'name': None, 'status': None} def _read(self, last_pass=False): self.lines = list(self.file.get_lines(size=1024 * 1024, last_pass=last_pass)) for line in self.lines: if line.startswith("metric_name"): parts = line[:-1].split(",") self.position['timestamp'] = parts.index('timestamp') self.position['metric_value'] = parts.index('metric_value') self.position['error'] = parts.index('error') self.position['expected_response'] = parts.index('expected_response') self.position['name'] = parts.index('name') self.position['status'] = parts.index('status') elif line.startswith("http_reqs"): self.data['timestamp'].append(int(line.split(',')[self.position['timestamp']])) self.data['label'].append(line.split(',')[self.position['name']]) self.data['r_code'].append(line.split(',')[self.position['status']]) error = line.split(',')[self.position['error']] if not error and line.split(',')[self.position['expected_response']] == 'false': error = f"Response code: {line.split(',')[self.position['status']]}" self.data['error_msg'].append(error) elif line.startswith("http_req_duration"): self.data['http_req_duration'].append(float(line.split(',')[self.position['metric_value']])) elif line.startswith("http_req_connecting"): self.data['http_req_connecting'].append(float(line.split(',')[self.position['metric_value']])) elif line.startswith("http_req_tls_handshaking"): self.data['http_req_tls_handshaking'].append(float(line.split(',')[self.position['metric_value']])) elif line.startswith("http_req_waiting"): self.data['http_req_waiting'].append(float(line.split(',')[self.position['metric_value']])) elif line.startswith("vus") and not line.startswith("vus_max"): self.data['vus'].append(int(float(line.split(',')[self.position['metric_value']]))) elif line.startswith("data_received"): self.data['data_received'].append(float(line.split(',')[self.position['metric_value']])) if self.data['vus'] and len(self.data['data_received']) >= self.data['vus'][0] and \ len(self.data['http_req_waiting']) >= self.data['vus'][0]: for i in range(self.data['vus'][0]): kpi_set = ( self.data['timestamp'][0], self.data['label'][0], self.data['vus'][0], self.data['http_req_duration'][0] / 1000, (self.data['http_req_connecting'][0] + self.data['http_req_tls_handshaking'][0]) / 1000, self.data['http_req_waiting'][0] / 1000, self.data['r_code'][0], None if not self.data['error_msg'][0] else self.data['error_msg'][0], '', self.data['data_received'][0]) for key in self.data.keys(): if key != 'vus': self.data[key].pop(0) yield kpi_set self.data['vus'].pop(0) class K6(RequiredTool): def __init__(self, config=None, **kwargs): super(K6, self).__init__(installable=False, **kwargs) def check_if_installed(self): self.log.debug('Checking K6 Framework: %s' % self.tool_path) try: out, err = self.call(['k6', 'version']) except CALL_PROBLEMS as exc: self.log.warning("%s check failed: %s", self.tool_name, exc) return False if err: out += err self.log.debug("K6 output: %s", out) return True
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations import copy import inspect import json import re import typing from collections import defaultdict from dataclasses import dataclass from enum import Enum from pathlib import Path from typing import Any, DefaultDict, Iterable, Mapping import yaml from pants.base.build_environment import get_buildroot from pants.base.deprecated import validate_deprecation_semver, warn_or_error from pants.option.config import DEFAULT_SECTION, Config from pants.option.custom_types import ( DictValueComponent, ListValueComponent, UnsetBool, dir_option, file_option, shell_str, target_option, ) from pants.option.errors import ( BooleanConversionError, BooleanOptionNameWithNo, DefaultMemberValueType, DefaultValueType, FromfileError, HelpType, ImplicitValIsNone, InvalidKwarg, InvalidKwargNonGlobalScope, InvalidMemberType, MemberTypeNotAllowed, MutuallyExclusiveOptionError, NoOptionNames, OptionAlreadyRegistered, OptionNameDash, OptionNameDoubleDash, ParseError, PassthroughType, RegistrationError, UnknownFlagsError, ) from pants.option.option_util import is_dict_option, is_list_option from pants.option.option_value_container import OptionValueContainer, OptionValueContainerBuilder from pants.option.ranked_value import Rank, RankedValue from pants.option.scope import GLOBAL_SCOPE, GLOBAL_SCOPE_CONFIG_SECTION, ScopeInfo from pants.util.meta import frozen_after_init @dataclass(frozen=True) class OptionValueHistory: ranked_values: tuple[RankedValue] @property def final_value(self) -> RankedValue: return self.ranked_values[-1] class Parser: """An argument parser.""" @staticmethod def is_bool(kwargs: Mapping[str, Any]) -> bool: type_arg = kwargs.get("type") if type_arg is None: return False if type_arg is bool: return True try: return typing.get_type_hints(type_arg).get("return") is bool except TypeError: return False @staticmethod def ensure_bool(val: bool | str) -> bool: if isinstance(val, bool): return val if isinstance(val, str): s = val.lower() if s == "true": return True if s == "false": return False raise BooleanConversionError(f'Got "{val}". Expected "True" or "False".') raise BooleanConversionError(f"Got {val}. Expected True or False.") @classmethod def _invert(cls, s: bool | str | None) -> bool | None: if s is None: return None b = cls.ensure_bool(s) return not b @classmethod def scope_str(cls, scope: str) -> str: return "global scope" if scope == GLOBAL_SCOPE else f"scope '{scope}'" def __init__( self, env: Mapping[str, str], config: Config, scope_info: ScopeInfo, ) -> None: """Create a Parser instance. :param env: a dict of environment variables. :param config: data from a config file. :param scope_info: the scope this parser acts for. """ self._env = env self._config = config self._scope_info = scope_info self._scope = self._scope_info.scope # All option args registered with this parser. Used to prevent conflicts. self._known_args: set[str] = set() # List of (args, kwargs) registration pairs, exactly as captured at registration time. self._option_registrations: list[tuple[tuple[str, ...], dict[str, Any]]] = [] # Map of dest -> history. self._history: dict[str, OptionValueHistory] = {} @property def scope_info(self) -> ScopeInfo: return self._scope_info @property def scope(self) -> str: return self._scope def history(self, dest: str) -> OptionValueHistory | None: return self._history.get(dest) @frozen_after_init @dataclass(unsafe_hash=True) class ParseArgsRequest: flag_value_map: dict[str, list[Any]] namespace: OptionValueContainerBuilder passthrough_args: list[str] allow_unknown_flags: bool def __init__( self, flags_in_scope: Iterable[str], namespace: OptionValueContainerBuilder, passthrough_args: list[str], allow_unknown_flags: bool, ) -> None: """ :param flags_in_scope: Iterable of arg strings to parse into flag values. :param namespace: The object to register the flag values on """ self.flag_value_map = self._create_flag_value_map(flags_in_scope) self.namespace = namespace self.passthrough_args = passthrough_args self.allow_unknown_flags = allow_unknown_flags @staticmethod def _create_flag_value_map(flags: Iterable[str]) -> DefaultDict[str, list[str | None]]: """Returns a map of flag -> list of values, based on the given flag strings. None signals no value given (e.g., -x, --foo). The value is a list because the user may specify the same flag multiple times, and that's sometimes OK (e.g., when appending to list- valued options). """ flag_value_map: DefaultDict[str, list[str | None]] = defaultdict(list) for flag in flags: flag_val: str | None key, has_equals_sign, flag_val = flag.partition("=") if not has_equals_sign: if not flag.startswith("--"): # '-xfoo' style. key = flag[0:2] flag_val = flag[2:] if not flag_val: # Either a short option with no value or a long option with no equals sign. # Important so we can distinguish between no value ('--foo') and setting to an empty # string ('--foo='), for options with an implicit_value. flag_val = None flag_value_map[key].append(flag_val) return flag_value_map def parse_args(self, parse_args_request: ParseArgsRequest) -> OptionValueContainer: """Set values for this parser's options on the namespace object. :raises: :class:`ParseError` if any flags weren't recognized. """ flag_value_map = parse_args_request.flag_value_map namespace = parse_args_request.namespace mutex_map: DefaultDict[str, list[str]] = defaultdict(list) for args, kwargs in self._option_registrations: self._validate(args, kwargs) dest = self.parse_dest(*args, **kwargs) # Compute the values provided on the command line for this option. Note that there may be # multiple values, for any combination of the following reasons: # - The user used the same flag multiple times. # - The user specified a boolean flag (--foo) and its inverse (--no-foo). # - The option has multiple names, and the user used more than one of them. # # We also check if the option is deprecated, but we only do so if the option is explicitly # specified as a command-line flag, so we don't spam users with deprecated option values # specified in config, which isn't something they control. implicit_value = kwargs.get("implicit_value") if implicit_value is None and self.is_bool(kwargs): implicit_value = True # Allows --foo to mean --foo=true. flag_vals: list[int | float | bool | str] = [] def add_flag_val(v: int | float | bool | str | None) -> None: if v is None: if implicit_value is None: raise ParseError( f"Missing value for command line flag {arg} in {self._scope_str()}" ) flag_vals.append(implicit_value) else: flag_vals.append(v) for arg in args: # If the user specified --no-foo on the cmd line, treat it as if the user specified # --foo, but with the inverse value. if self.is_bool(kwargs): inverse_arg = self._inverse_arg(arg) if inverse_arg in flag_value_map: flag_value_map[arg] = [self._invert(v) for v in flag_value_map[inverse_arg]] implicit_value = self._invert(implicit_value) del flag_value_map[inverse_arg] if arg in flag_value_map: for v in flag_value_map[arg]: add_flag_val(v) del flag_value_map[arg] # Get the value for this option, falling back to defaults as needed. try: value_history = self._compute_value( dest, kwargs, flag_vals, parse_args_request.passthrough_args ) self._history[dest] = value_history val = value_history.final_value except ParseError as e: # Reraise a new exception with context on the option being processed at the time of error. # Note that other exception types can be raised here that are caught by ParseError (e.g. # BooleanConversionError), hence we reference the original exception type as type(e). args_str = ", ".join(args) raise type(e)( f"Error computing value for {args_str} in {self._scope_str()} (may also be " f"from PANTS_* environment variables).\nCaused by:\n{e}" ) # If the option is explicitly given, check deprecation and mutual exclusion. if val.rank > Rank.HARDCODED: self._check_deprecated(dest, kwargs) mutex_dest = kwargs.get("mutually_exclusive_group") mutex_map_key = mutex_dest or dest mutex_map[mutex_map_key].append(dest) if len(mutex_map[mutex_map_key]) > 1: raise MutuallyExclusiveOptionError( "Can only provide one of these mutually exclusive options in " f"{self._scope_str()}, but multiple given: " f"{", ".join(mutex_map[mutex_map_key])}" ) setattr(namespace, dest, val) if not parse_args_request.allow_unknown_flags and flag_value_map: # There were unconsumed flags. raise UnknownFlagsError(tuple(flag_value_map.keys()), self.scope) return namespace.build() def option_registrations_iter(self): """Returns an iterator over the normalized registration arguments of each option in this parser. Useful for generating help and other documentation. Each yielded item is an (args, kwargs) pair, as passed to register(), except that kwargs will be normalized in the following ways: - It will always have 'dest' explicitly set. - It will always have 'default' explicitly set, and the value will be a RankedValue. """ def normalize_kwargs(orig_args, orig_kwargs): nkwargs = copy.copy(orig_kwargs) dest = self.parse_dest(*orig_args, **nkwargs) nkwargs["dest"] = dest if not ("default" in nkwargs and isinstance(nkwargs["default"], RankedValue)): type_arg = nkwargs.get("type", str) member_type = nkwargs.get("member_type", str) default_val = self.to_value_type( nkwargs.get("default"), type_arg, member_type, dest ) if isinstance(default_val, (ListValueComponent, DictValueComponent)): default_val = default_val.val nkwargs["default"] = RankedValue(Rank.HARDCODED, default_val) return nkwargs # Yield our directly-registered options. for args, kwargs in self._option_registrations: normalized_kwargs = normalize_kwargs(args, kwargs) yield args, normalized_kwargs def register(self, *args, **kwargs) -> None: """Register an option.""" if args: dest = self.parse_dest(*args, **kwargs) self._check_deprecated(dest, kwargs, print_warning=False) if self.is_bool(kwargs): default = kwargs.get("default") if default is None: # Unless a tri-state bool is explicitly opted into with the `UnsetBool` default value, # boolean options always have an implicit boolean-typed default. We make that default # explicit here. kwargs["default"] = not self.ensure_bool(kwargs.get("implicit_value", True)) elif default is UnsetBool: kwargs["default"] = None # Record the args. We'll do the underlying parsing on-demand. self._option_registrations.append((args, kwargs)) # Look for direct conflicts. for arg in args: if arg in self._known_args: raise OptionAlreadyRegistered(self.scope, arg) self._known_args.update(args) def _check_deprecated(self, dest: str, kwargs, print_warning: bool = True) -> None: """Checks option for deprecation and issues a warning/error if necessary.""" removal_version = kwargs.get("removal_version", None) if removal_version is not None: warn_or_error( removal_version=removal_version, entity=f"option '{dest}' in {self._scope_str()}", start_version=kwargs.get("deprecation_start_version", None), hint=kwargs.get("removal_hint", None), print_warning=print_warning, ) _allowed_registration_kwargs = { "type", "member_type", "choices", "dest", "default", "default_help_repr", "implicit_value", "metavar", "help", "advanced", "fingerprint", "removal_version", "removal_hint", "deprecation_start_version", "fromfile", "mutually_exclusive_group", "daemon", "passthrough", } _allowed_member_types = { str, int, float, dict, dir_option, file_option, target_option, shell_str, } def _validate(self, args, kwargs) -> None: """Validate option registration arguments.""" def error( exception_type: type[RegistrationError], arg_name: str | None = None, **msg_kwargs, ) -> None: if arg_name is None: arg_name = args[0] if args else "<unknown>" raise exception_type(self.scope, arg_name, **msg_kwargs) if not args: error(NoOptionNames) # validate args. for arg in args: if not arg.startswith("-"): error(OptionNameDash, arg_name=arg) if not arg.startswith("--") and len(arg) > 2: error(OptionNameDoubleDash, arg_name=arg) # Validate kwargs. if "implicit_value" in kwargs and kwargs["implicit_value"] is None: error(ImplicitValIsNone) type_arg = kwargs.get("type", str) if "member_type" in kwargs and type_arg != list: error(MemberTypeNotAllowed, type_=type_arg.__name__) member_type = kwargs.get("member_type", str) is_enum = inspect.isclass(member_type) and issubclass(member_type, Enum) if not is_enum and member_type not in self._allowed_member_types: error(InvalidMemberType, member_type=member_type.__name__) help_arg = kwargs.get("help") if help_arg is not None and not isinstance(help_arg, str): error(HelpType, help_type=type(help_arg).__name__) # check type of default value default_value = kwargs.get("default") if default_value is not None: if isinstance(default_value, str) and type_arg != str: # attempt to parse default value, for correctness.. # custom function types may implement their own validation default_value = self.to_value_type(default_value, type_arg, member_type, "") if hasattr(default_value, "val"): default_value = default_value.val # fall through to type check, to verify that custom types returned a value of correct type if isinstance(type_arg, type) and not isinstance(default_value, type_arg): error( DefaultValueType, option_type=type_arg.__name__, default_value=kwargs["default"], value_type=type(default_value).__name__, ) # verify list member types (this is not done by the custom list value type) if type_arg == list: for member_val in default_value: if not isinstance(member_type, type): # defer value validation to custom type member_type(member_val) elif not isinstance(member_val, member_type): error( DefaultMemberValueType, member_type=member_type.__name__, member_value=member_val, value_type=type(member_val).__name__, ) if ( "passthrough" in kwargs and kwargs["passthrough"] and (type_arg != list or member_type not in (shell_str, str)) ): error(PassthroughType) for kwarg in kwargs: if kwarg not in self._allowed_registration_kwargs: error(InvalidKwarg, kwarg=kwarg) # Ensure `daemon=True` can't be passed on non-global scopes. if kwarg == "daemon" and self._scope != GLOBAL_SCOPE: error(InvalidKwargNonGlobalScope, kwarg=kwarg) removal_version = kwargs.get("removal_version") if removal_version is not None: validate_deprecation_semver(removal_version, "removal version") _ENV_SANITIZER_RE = re.compile(r"[.-]") @staticmethod def parse_dest(*args, **kwargs): """Return the dest for an option registration. If an explicit `dest` is specified, returns that and otherwise derives a default from the option flags where '--foo-bar' -> 'foo_bar' and '-x' -> 'x'. The dest is used for: - The name of the field containing the option value. - The key in the config file. - Computing the name of the env var used to set the option name. """ dest = kwargs.get("dest") if dest: return dest # No explicit dest, so compute one based on the first long arg, or the short arg # if that's all there is. arg = next((a for a in args if a.startswith("--")), args[0]) return arg.lstrip("-").replace("-", "_") @staticmethod def _convert_member_type(member_type, value): if member_type == dict: return DictValueComponent.create(value).val try: return member_type(value) except ValueError as error: raise ParseError(str(error)) def to_value_type(self, val_str, type_arg, member_type, dest): """Convert a string to a value of the option's type.""" if val_str is None: return None if type_arg == bool: return self.ensure_bool(val_str) try: if type_arg == list: return ListValueComponent.create(val_str, member_type=member_type) if type_arg == dict: return DictValueComponent.create(val_str) return type_arg(val_str) except (TypeError, ValueError) as e: if issubclass(type_arg, Enum): choices = ", ".join(f"{choice.value}" for choice in type_arg) raise ParseError(f"Invalid choice '{val_str}'. Choose from: {choices}") raise ParseError( f"Error applying type '{type_arg.__name__}' to option value '{val_str}': {e}" ) @classmethod def get_env_var_names(cls, scope: str, dest: str): # Get value from environment, and capture details about its derivation. udest = dest.upper() if scope == GLOBAL_SCOPE: # For convenience, we allow three forms of env var for global scope options. # The fully-specified env var is PANTS_GLOBAL_FOO, which is uniform with PANTS_<SCOPE>_FOO # for all the other scopes. However we also allow simply PANTS_FOO. And if the option name # itself starts with 'pants-' then we also allow simply FOO. E.g., PANTS_WORKDIR instead of # PANTS_PANTS_WORKDIR or PANTS_GLOBAL_PANTS_WORKDIR. We take the first specified value we # find, in this order: PANTS_GLOBAL_FOO, PANTS_FOO, FOO. env_vars = [f"PANTS_GLOBAL_{udest}", f"PANTS_{udest}"] if udest.startswith("PANTS_"): env_vars.append(udest) else: sanitized_env_var_scope = cls._ENV_SANITIZER_RE.sub("_", scope.upper()) env_vars = [f"PANTS_{sanitized_env_var_scope}_{udest}"] return env_vars def _compute_value(self, dest, kwargs, flag_val_strs, passthru_arg_strs): """Compute the value to use for an option. The source of the value is chosen according to the ranking in Rank. """ type_arg = kwargs.get("type", str) member_type = kwargs.get("member_type", str) def to_value_type(val_str): return self.to_value_type(val_str, type_arg, member_type, dest) # Helper function to expand a fromfile=True value string, if needed. # May return a string or a dict/list decoded from a json/yaml file. def expand(val_or_str): if ( kwargs.get("fromfile", True) and isinstance(val_or_str, str) and val_or_str.startswith("@") ): if val_or_str.startswith("@@"): # Support a literal @ for fromfile values via @@. return val_or_str[1:] else: fromfile = val_or_str[1:] try: with open(fromfile) as fp: s = fp.read().strip() if fromfile.endswith(".json"): return json.loads(s) elif fromfile.endswith(".yml") or fromfile.endswith(".yaml"): return yaml.safe_load(s) else: return s except (OSError, ValueError, yaml.YAMLError) as e: raise FromfileError( f"Failed to read {dest} in {self._scope_str()} from file {fromfile}: {e!r}" ) else: return val_or_str # Helper function to merge multiple values from a single rank (e.g., multiple flags, # or multiple config files). def merge_in_rank(vals): if not vals: return None expanded_vals = [to_value_type(expand(x)) for x in vals] if is_list_option(kwargs): return ListValueComponent.merge(expanded_vals) if is_dict_option(kwargs): return DictValueComponent.merge(expanded_vals) return expanded_vals[-1] # Last value wins. # Get value from config files, and capture details about its derivation. config_details = None config_section = GLOBAL_SCOPE_CONFIG_SECTION if self._scope == GLOBAL_SCOPE else self._scope config_default_val = merge_in_rank(self._config.get(DEFAULT_SECTION, dest)) config_val = merge_in_rank(self._config.get(config_section, dest)) config_source_files = self._config.get_sources_for_option(config_section, dest) if config_source_files: config_details = f"from {", ".join(config_source_files)}" # Get value from environment, and capture details about its derivation. env_vars = self.get_env_var_names(self._scope, dest) env_val = None env_details = None if self._env: for env_var in env_vars: if env_var in self._env: env_val = merge_in_rank([self._env.get(env_var)]) env_details = f"from env var {env_var}" break # Get value from cmd-line flags. flag_vals = list(flag_val_strs) if kwargs.get("passthrough"): # NB: Passthrough arguments are either of type `str` or `shell_str` # (see self._validate): the former never need interpretation, and the latter do not # need interpretation when they have been provided directly via `sys.argv` as the # passthrough args have been. flag_vals.append( ListValueComponent(ListValueComponent.MODIFY, [*passthru_arg_strs], []) ) if len(flag_vals) > 1 and not (is_list_option(kwargs) or is_dict_option(kwargs)): raise ParseError( f"Multiple cmd line flags specified for option {dest} in {self._scope_str()}" ) flag_val = merge_in_rank(flag_vals) flag_details = None if flag_val is None else "from command-line flag" # Rank all available values. values_to_rank = [ (flag_val, flag_details), (env_val, env_details), (config_val, config_details), (config_default_val, config_details), (to_value_type(kwargs.get("default")), None), (None, None), ] # Note that ranked_vals will always have at least one element, and all elements will be # instances of RankedValue (so none will be None, although they may wrap a None value). ranked_vals = list(reversed(list(RankedValue.prioritized_iter(*values_to_rank)))) def group(value_component_type, process_val_func) -> list[RankedValue]: # We group any values that are merged together, so that the history can reflect # merges vs. replacements in a useful way. E.g., if we merge [a, b] and [c], # and then replace it with [d, e], the history will contain: # - [d, e] (from command-line flag) # - [a, b, c] (from env var, from config) # And similarly for dicts. grouped: list[list[RankedValue]] = [[]] for ranked_val in ranked_vals: if ranked_val.value and ranked_val.value.action == value_component_type.REPLACE: grouped.append([]) grouped[-1].append(ranked_val) return [ RankedValue( grp[-1].rank, process_val_func( value_component_type.merge( rv.value for rv in grp if rv.value is not None ).val ), ", ".join(rv.details for rv in grp if rv.details), ) for grp in grouped if grp ] if is_list_option(kwargs): def process_list(lst): return [self._convert_member_type(member_type, val) for val in lst] historic_ranked_vals = group(ListValueComponent, process_list) elif is_dict_option(kwargs): historic_ranked_vals = group(DictValueComponent, lambda x: x) else: historic_ranked_vals = ranked_vals value_history = OptionValueHistory(tuple(historic_ranked_vals)) # Helper function to check various validity constraints on final option values. def check_scalar_value(val): if val is None: return choices = kwargs.get("choices") if choices is None and "type" in kwargs: if inspect.isclass(type_arg) and issubclass(type_arg, Enum): choices = list(type_arg) if choices is not None and val not in choices: raise ParseError( f"`{val}` is not an allowed value for option {dest} in {self._scope_str()}. " f"Must be one of: {choices}" ) elif type_arg == file_option: check_file_exists(val) elif type_arg == dir_option: check_dir_exists(val) def check_file_exists(val) -> None: error_prefix = f"File value `{val}` for option `{dest}` in `{self._scope_str()}`" try: path = Path(val) path_with_buildroot = Path(get_buildroot(), val) except TypeError: raise ParseError(f"{error_prefix} cannot be parsed as a file path.") if not path.is_file() and not path_with_buildroot.is_file(): raise ParseError(f"{error_prefix} does not exist.") def check_dir_exists(val) -> None: error_prefix = f"Directory value `{val}` for option `{dest}` in `{self._scope_str()}`" try: path = Path(val) path_with_buildroot = Path(get_buildroot(), val) except TypeError: raise ParseError(f"{error_prefix} cannot be parsed as a directory path.") if not path.is_dir() and not path_with_buildroot.is_dir(): raise ParseError(f"{error_prefix} does not exist.") # Validate the final value. final_val = value_history.final_value if isinstance(final_val.value, list): for component in final_val.value: check_scalar_value(component) if inspect.isclass(member_type) and issubclass(member_type, Enum): if len(final_val.value) != len(set(final_val.value)): raise ParseError(f"Duplicate enum values specified in list: {final_val.value}") elif isinstance(final_val.value, dict): for component in final_val.value.values(): check_scalar_value(component) else: check_scalar_value(final_val.value) return value_history def _inverse_arg(self, arg: str) -> str | None: if not arg.startswith("--"): return None if arg.startswith("--no-"): raise BooleanOptionNameWithNo(self.scope, arg) return f"--no-{arg[2:]}" def _scope_str(self, scope: str | None = None) -> str: return self.scope_str(scope if scope is not None else self.scope) def __str__(self) -> str: return f"Parser({self._scope})"
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations import copy import inspect import json import re import typing from collections import defaultdict from dataclasses import dataclass from enum import Enum from pathlib import Path from typing import Any, DefaultDict, Iterable, Mapping import yaml from pants.base.build_environment import get_buildroot from pants.base.deprecated import validate_deprecation_semver, warn_or_error from pants.option.config import DEFAULT_SECTION, Config from pants.option.custom_types import ( DictValueComponent, ListValueComponent, UnsetBool, dir_option, file_option, shell_str, target_option, ) from pants.option.errors import ( BooleanConversionError, BooleanOptionNameWithNo, DefaultMemberValueType, DefaultValueType, FromfileError, HelpType, ImplicitValIsNone, InvalidKwarg, InvalidKwargNonGlobalScope, InvalidMemberType, MemberTypeNotAllowed, MutuallyExclusiveOptionError, NoOptionNames, OptionAlreadyRegistered, OptionNameDash, OptionNameDoubleDash, ParseError, PassthroughType, RegistrationError, UnknownFlagsError, ) from pants.option.option_util import is_dict_option, is_list_option from pants.option.option_value_container import OptionValueContainer, OptionValueContainerBuilder from pants.option.ranked_value import Rank, RankedValue from pants.option.scope import GLOBAL_SCOPE, GLOBAL_SCOPE_CONFIG_SECTION, ScopeInfo from pants.util.meta import frozen_after_init @dataclass(frozen=True) class OptionValueHistory: ranked_values: tuple[RankedValue] @property def final_value(self) -> RankedValue: return self.ranked_values[-1] class Parser: """An argument parser.""" @staticmethod def is_bool(kwargs: Mapping[str, Any]) -> bool: type_arg = kwargs.get("type") if type_arg is None: return False if type_arg is bool: return True try: return typing.get_type_hints(type_arg).get("return") is bool except TypeError: return False @staticmethod def ensure_bool(val: bool | str) -> bool: if isinstance(val, bool): return val if isinstance(val, str): s = val.lower() if s == "true": return True if s == "false": return False raise BooleanConversionError(f'Got "{val}". Expected "True" or "False".') raise BooleanConversionError(f"Got {val}. Expected True or False.") @classmethod def _invert(cls, s: bool | str | None) -> bool | None: if s is None: return None b = cls.ensure_bool(s) return not b @classmethod def scope_str(cls, scope: str) -> str: return "global scope" if scope == GLOBAL_SCOPE else f"scope '{scope}'" def __init__( self, env: Mapping[str, str], config: Config, scope_info: ScopeInfo, ) -> None: """Create a Parser instance. :param env: a dict of environment variables. :param config: data from a config file. :param scope_info: the scope this parser acts for. """ self._env = env self._config = config self._scope_info = scope_info self._scope = self._scope_info.scope # All option args registered with this parser. Used to prevent conflicts. self._known_args: set[str] = set() # List of (args, kwargs) registration pairs, exactly as captured at registration time. self._option_registrations: list[tuple[tuple[str, ...], dict[str, Any]]] = [] # Map of dest -> history. self._history: dict[str, OptionValueHistory] = {} @property def scope_info(self) -> ScopeInfo: return self._scope_info @property def scope(self) -> str: return self._scope def history(self, dest: str) -> OptionValueHistory | None: return self._history.get(dest) @frozen_after_init @dataclass(unsafe_hash=True) class ParseArgsRequest: flag_value_map: dict[str, list[Any]] namespace: OptionValueContainerBuilder passthrough_args: list[str] allow_unknown_flags: bool def __init__( self, flags_in_scope: Iterable[str], namespace: OptionValueContainerBuilder, passthrough_args: list[str], allow_unknown_flags: bool, ) -> None: """ :param flags_in_scope: Iterable of arg strings to parse into flag values. :param namespace: The object to register the flag values on """ self.flag_value_map = self._create_flag_value_map(flags_in_scope) self.namespace = namespace self.passthrough_args = passthrough_args self.allow_unknown_flags = allow_unknown_flags @staticmethod def _create_flag_value_map(flags: Iterable[str]) -> DefaultDict[str, list[str | None]]: """Returns a map of flag -> list of values, based on the given flag strings. None signals no value given (e.g., -x, --foo). The value is a list because the user may specify the same flag multiple times, and that's sometimes OK (e.g., when appending to list- valued options). """ flag_value_map: DefaultDict[str, list[str | None]] = defaultdict(list) for flag in flags: flag_val: str | None key, has_equals_sign, flag_val = flag.partition("=") if not has_equals_sign: if not flag.startswith("--"): # '-xfoo' style. key = flag[0:2] flag_val = flag[2:] if not flag_val: # Either a short option with no value or a long option with no equals sign. # Important so we can distinguish between no value ('--foo') and setting to an empty # string ('--foo='), for options with an implicit_value. flag_val = None flag_value_map[key].append(flag_val) return flag_value_map def parse_args(self, parse_args_request: ParseArgsRequest) -> OptionValueContainer: """Set values for this parser's options on the namespace object. :raises: :class:`ParseError` if any flags weren't recognized. """ flag_value_map = parse_args_request.flag_value_map namespace = parse_args_request.namespace mutex_map: DefaultDict[str, list[str]] = defaultdict(list) for args, kwargs in self._option_registrations: self._validate(args, kwargs) dest = self.parse_dest(*args, **kwargs) # Compute the values provided on the command line for this option. Note that there may be # multiple values, for any combination of the following reasons: # - The user used the same flag multiple times. # - The user specified a boolean flag (--foo) and its inverse (--no-foo). # - The option has multiple names, and the user used more than one of them. # # We also check if the option is deprecated, but we only do so if the option is explicitly # specified as a command-line flag, so we don't spam users with deprecated option values # specified in config, which isn't something they control. implicit_value = kwargs.get("implicit_value") if implicit_value is None and self.is_bool(kwargs): implicit_value = True # Allows --foo to mean --foo=true. flag_vals: list[int | float | bool | str] = [] def add_flag_val(v: int | float | bool | str | None) -> None: if v is None: if implicit_value is None: raise ParseError( f"Missing value for command line flag {arg} in {self._scope_str()}" ) flag_vals.append(implicit_value) else: flag_vals.append(v) for arg in args: # If the user specified --no-foo on the cmd line, treat it as if the user specified # --foo, but with the inverse value. if self.is_bool(kwargs): inverse_arg = self._inverse_arg(arg) if inverse_arg in flag_value_map: flag_value_map[arg] = [self._invert(v) for v in flag_value_map[inverse_arg]] implicit_value = self._invert(implicit_value) del flag_value_map[inverse_arg] if arg in flag_value_map: for v in flag_value_map[arg]: add_flag_val(v) del flag_value_map[arg] # Get the value for this option, falling back to defaults as needed. try: value_history = self._compute_value( dest, kwargs, flag_vals, parse_args_request.passthrough_args ) self._history[dest] = value_history val = value_history.final_value except ParseError as e: # Reraise a new exception with context on the option being processed at the time of error. # Note that other exception types can be raised here that are caught by ParseError (e.g. # BooleanConversionError), hence we reference the original exception type as type(e). args_str = ", ".join(args) raise type(e)( f"Error computing value for {args_str} in {self._scope_str()} (may also be " f"from PANTS_* environment variables).\nCaused by:\n{e}" ) # If the option is explicitly given, check deprecation and mutual exclusion. if val.rank > Rank.HARDCODED: self._check_deprecated(dest, kwargs) mutex_dest = kwargs.get("mutually_exclusive_group") mutex_map_key = mutex_dest or dest mutex_map[mutex_map_key].append(dest) if len(mutex_map[mutex_map_key]) > 1: raise MutuallyExclusiveOptionError( "Can only provide one of these mutually exclusive options in " f"{self._scope_str()}, but multiple given: " f"{', '.join(mutex_map[mutex_map_key])}" ) setattr(namespace, dest, val) if not parse_args_request.allow_unknown_flags and flag_value_map: # There were unconsumed flags. raise UnknownFlagsError(tuple(flag_value_map.keys()), self.scope) return namespace.build() def option_registrations_iter(self): """Returns an iterator over the normalized registration arguments of each option in this parser. Useful for generating help and other documentation. Each yielded item is an (args, kwargs) pair, as passed to register(), except that kwargs will be normalized in the following ways: - It will always have 'dest' explicitly set. - It will always have 'default' explicitly set, and the value will be a RankedValue. """ def normalize_kwargs(orig_args, orig_kwargs): nkwargs = copy.copy(orig_kwargs) dest = self.parse_dest(*orig_args, **nkwargs) nkwargs["dest"] = dest if not ("default" in nkwargs and isinstance(nkwargs["default"], RankedValue)): type_arg = nkwargs.get("type", str) member_type = nkwargs.get("member_type", str) default_val = self.to_value_type( nkwargs.get("default"), type_arg, member_type, dest ) if isinstance(default_val, (ListValueComponent, DictValueComponent)): default_val = default_val.val nkwargs["default"] = RankedValue(Rank.HARDCODED, default_val) return nkwargs # Yield our directly-registered options. for args, kwargs in self._option_registrations: normalized_kwargs = normalize_kwargs(args, kwargs) yield args, normalized_kwargs def register(self, *args, **kwargs) -> None: """Register an option.""" if args: dest = self.parse_dest(*args, **kwargs) self._check_deprecated(dest, kwargs, print_warning=False) if self.is_bool(kwargs): default = kwargs.get("default") if default is None: # Unless a tri-state bool is explicitly opted into with the `UnsetBool` default value, # boolean options always have an implicit boolean-typed default. We make that default # explicit here. kwargs["default"] = not self.ensure_bool(kwargs.get("implicit_value", True)) elif default is UnsetBool: kwargs["default"] = None # Record the args. We'll do the underlying parsing on-demand. self._option_registrations.append((args, kwargs)) # Look for direct conflicts. for arg in args: if arg in self._known_args: raise OptionAlreadyRegistered(self.scope, arg) self._known_args.update(args) def _check_deprecated(self, dest: str, kwargs, print_warning: bool = True) -> None: """Checks option for deprecation and issues a warning/error if necessary.""" removal_version = kwargs.get("removal_version", None) if removal_version is not None: warn_or_error( removal_version=removal_version, entity=f"option '{dest}' in {self._scope_str()}", start_version=kwargs.get("deprecation_start_version", None), hint=kwargs.get("removal_hint", None), print_warning=print_warning, ) _allowed_registration_kwargs = { "type", "member_type", "choices", "dest", "default", "default_help_repr", "implicit_value", "metavar", "help", "advanced", "fingerprint", "removal_version", "removal_hint", "deprecation_start_version", "fromfile", "mutually_exclusive_group", "daemon", "passthrough", } _allowed_member_types = { str, int, float, dict, dir_option, file_option, target_option, shell_str, } def _validate(self, args, kwargs) -> None: """Validate option registration arguments.""" def error( exception_type: type[RegistrationError], arg_name: str | None = None, **msg_kwargs, ) -> None: if arg_name is None: arg_name = args[0] if args else "<unknown>" raise exception_type(self.scope, arg_name, **msg_kwargs) if not args: error(NoOptionNames) # validate args. for arg in args: if not arg.startswith("-"): error(OptionNameDash, arg_name=arg) if not arg.startswith("--") and len(arg) > 2: error(OptionNameDoubleDash, arg_name=arg) # Validate kwargs. if "implicit_value" in kwargs and kwargs["implicit_value"] is None: error(ImplicitValIsNone) type_arg = kwargs.get("type", str) if "member_type" in kwargs and type_arg != list: error(MemberTypeNotAllowed, type_=type_arg.__name__) member_type = kwargs.get("member_type", str) is_enum = inspect.isclass(member_type) and issubclass(member_type, Enum) if not is_enum and member_type not in self._allowed_member_types: error(InvalidMemberType, member_type=member_type.__name__) help_arg = kwargs.get("help") if help_arg is not None and not isinstance(help_arg, str): error(HelpType, help_type=type(help_arg).__name__) # check type of default value default_value = kwargs.get("default") if default_value is not None: if isinstance(default_value, str) and type_arg != str: # attempt to parse default value, for correctness.. # custom function types may implement their own validation default_value = self.to_value_type(default_value, type_arg, member_type, "") if hasattr(default_value, "val"): default_value = default_value.val # fall through to type check, to verify that custom types returned a value of correct type if isinstance(type_arg, type) and not isinstance(default_value, type_arg): error( DefaultValueType, option_type=type_arg.__name__, default_value=kwargs["default"], value_type=type(default_value).__name__, ) # verify list member types (this is not done by the custom list value type) if type_arg == list: for member_val in default_value: if not isinstance(member_type, type): # defer value validation to custom type member_type(member_val) elif not isinstance(member_val, member_type): error( DefaultMemberValueType, member_type=member_type.__name__, member_value=member_val, value_type=type(member_val).__name__, ) if ( "passthrough" in kwargs and kwargs["passthrough"] and (type_arg != list or member_type not in (shell_str, str)) ): error(PassthroughType) for kwarg in kwargs: if kwarg not in self._allowed_registration_kwargs: error(InvalidKwarg, kwarg=kwarg) # Ensure `daemon=True` can't be passed on non-global scopes. if kwarg == "daemon" and self._scope != GLOBAL_SCOPE: error(InvalidKwargNonGlobalScope, kwarg=kwarg) removal_version = kwargs.get("removal_version") if removal_version is not None: validate_deprecation_semver(removal_version, "removal version") _ENV_SANITIZER_RE = re.compile(r"[.-]") @staticmethod def parse_dest(*args, **kwargs): """Return the dest for an option registration. If an explicit `dest` is specified, returns that and otherwise derives a default from the option flags where '--foo-bar' -> 'foo_bar' and '-x' -> 'x'. The dest is used for: - The name of the field containing the option value. - The key in the config file. - Computing the name of the env var used to set the option name. """ dest = kwargs.get("dest") if dest: return dest # No explicit dest, so compute one based on the first long arg, or the short arg # if that's all there is. arg = next((a for a in args if a.startswith("--")), args[0]) return arg.lstrip("-").replace("-", "_") @staticmethod def _convert_member_type(member_type, value): if member_type == dict: return DictValueComponent.create(value).val try: return member_type(value) except ValueError as error: raise ParseError(str(error)) def to_value_type(self, val_str, type_arg, member_type, dest): """Convert a string to a value of the option's type.""" if val_str is None: return None if type_arg == bool: return self.ensure_bool(val_str) try: if type_arg == list: return ListValueComponent.create(val_str, member_type=member_type) if type_arg == dict: return DictValueComponent.create(val_str) return type_arg(val_str) except (TypeError, ValueError) as e: if issubclass(type_arg, Enum): choices = ", ".join(f"{choice.value}" for choice in type_arg) raise ParseError(f"Invalid choice '{val_str}'. Choose from: {choices}") raise ParseError( f"Error applying type '{type_arg.__name__}' to option value '{val_str}': {e}" ) @classmethod def get_env_var_names(cls, scope: str, dest: str): # Get value from environment, and capture details about its derivation. udest = dest.upper() if scope == GLOBAL_SCOPE: # For convenience, we allow three forms of env var for global scope options. # The fully-specified env var is PANTS_GLOBAL_FOO, which is uniform with PANTS_<SCOPE>_FOO # for all the other scopes. However we also allow simply PANTS_FOO. And if the option name # itself starts with 'pants-' then we also allow simply FOO. E.g., PANTS_WORKDIR instead of # PANTS_PANTS_WORKDIR or PANTS_GLOBAL_PANTS_WORKDIR. We take the first specified value we # find, in this order: PANTS_GLOBAL_FOO, PANTS_FOO, FOO. env_vars = [f"PANTS_GLOBAL_{udest}", f"PANTS_{udest}"] if udest.startswith("PANTS_"): env_vars.append(udest) else: sanitized_env_var_scope = cls._ENV_SANITIZER_RE.sub("_", scope.upper()) env_vars = [f"PANTS_{sanitized_env_var_scope}_{udest}"] return env_vars def _compute_value(self, dest, kwargs, flag_val_strs, passthru_arg_strs): """Compute the value to use for an option. The source of the value is chosen according to the ranking in Rank. """ type_arg = kwargs.get("type", str) member_type = kwargs.get("member_type", str) def to_value_type(val_str): return self.to_value_type(val_str, type_arg, member_type, dest) # Helper function to expand a fromfile=True value string, if needed. # May return a string or a dict/list decoded from a json/yaml file. def expand(val_or_str): if ( kwargs.get("fromfile", True) and isinstance(val_or_str, str) and val_or_str.startswith("@") ): if val_or_str.startswith("@@"): # Support a literal @ for fromfile values via @@. return val_or_str[1:] else: fromfile = val_or_str[1:] try: with open(fromfile) as fp: s = fp.read().strip() if fromfile.endswith(".json"): return json.loads(s) elif fromfile.endswith(".yml") or fromfile.endswith(".yaml"): return yaml.safe_load(s) else: return s except (OSError, ValueError, yaml.YAMLError) as e: raise FromfileError( f"Failed to read {dest} in {self._scope_str()} from file {fromfile}: {e!r}" ) else: return val_or_str # Helper function to merge multiple values from a single rank (e.g., multiple flags, # or multiple config files). def merge_in_rank(vals): if not vals: return None expanded_vals = [to_value_type(expand(x)) for x in vals] if is_list_option(kwargs): return ListValueComponent.merge(expanded_vals) if is_dict_option(kwargs): return DictValueComponent.merge(expanded_vals) return expanded_vals[-1] # Last value wins. # Get value from config files, and capture details about its derivation. config_details = None config_section = GLOBAL_SCOPE_CONFIG_SECTION if self._scope == GLOBAL_SCOPE else self._scope config_default_val = merge_in_rank(self._config.get(DEFAULT_SECTION, dest)) config_val = merge_in_rank(self._config.get(config_section, dest)) config_source_files = self._config.get_sources_for_option(config_section, dest) if config_source_files: config_details = f"from {', '.join(config_source_files)}" # Get value from environment, and capture details about its derivation. env_vars = self.get_env_var_names(self._scope, dest) env_val = None env_details = None if self._env: for env_var in env_vars: if env_var in self._env: env_val = merge_in_rank([self._env.get(env_var)]) env_details = f"from env var {env_var}" break # Get value from cmd-line flags. flag_vals = list(flag_val_strs) if kwargs.get("passthrough"): # NB: Passthrough arguments are either of type `str` or `shell_str` # (see self._validate): the former never need interpretation, and the latter do not # need interpretation when they have been provided directly via `sys.argv` as the # passthrough args have been. flag_vals.append( ListValueComponent(ListValueComponent.MODIFY, [*passthru_arg_strs], []) ) if len(flag_vals) > 1 and not (is_list_option(kwargs) or is_dict_option(kwargs)): raise ParseError( f"Multiple cmd line flags specified for option {dest} in {self._scope_str()}" ) flag_val = merge_in_rank(flag_vals) flag_details = None if flag_val is None else "from command-line flag" # Rank all available values. values_to_rank = [ (flag_val, flag_details), (env_val, env_details), (config_val, config_details), (config_default_val, config_details), (to_value_type(kwargs.get("default")), None), (None, None), ] # Note that ranked_vals will always have at least one element, and all elements will be # instances of RankedValue (so none will be None, although they may wrap a None value). ranked_vals = list(reversed(list(RankedValue.prioritized_iter(*values_to_rank)))) def group(value_component_type, process_val_func) -> list[RankedValue]: # We group any values that are merged together, so that the history can reflect # merges vs. replacements in a useful way. E.g., if we merge [a, b] and [c], # and then replace it with [d, e], the history will contain: # - [d, e] (from command-line flag) # - [a, b, c] (from env var, from config) # And similarly for dicts. grouped: list[list[RankedValue]] = [[]] for ranked_val in ranked_vals: if ranked_val.value and ranked_val.value.action == value_component_type.REPLACE: grouped.append([]) grouped[-1].append(ranked_val) return [ RankedValue( grp[-1].rank, process_val_func( value_component_type.merge( rv.value for rv in grp if rv.value is not None ).val ), ", ".join(rv.details for rv in grp if rv.details), ) for grp in grouped if grp ] if is_list_option(kwargs): def process_list(lst): return [self._convert_member_type(member_type, val) for val in lst] historic_ranked_vals = group(ListValueComponent, process_list) elif is_dict_option(kwargs): historic_ranked_vals = group(DictValueComponent, lambda x: x) else: historic_ranked_vals = ranked_vals value_history = OptionValueHistory(tuple(historic_ranked_vals)) # Helper function to check various validity constraints on final option values. def check_scalar_value(val): if val is None: return choices = kwargs.get("choices") if choices is None and "type" in kwargs: if inspect.isclass(type_arg) and issubclass(type_arg, Enum): choices = list(type_arg) if choices is not None and val not in choices: raise ParseError( f"`{val}` is not an allowed value for option {dest} in {self._scope_str()}. " f"Must be one of: {choices}" ) elif type_arg == file_option: check_file_exists(val) elif type_arg == dir_option: check_dir_exists(val) def check_file_exists(val) -> None: error_prefix = f"File value `{val}` for option `{dest}` in `{self._scope_str()}`" try: path = Path(val) path_with_buildroot = Path(get_buildroot(), val) except TypeError: raise ParseError(f"{error_prefix} cannot be parsed as a file path.") if not path.is_file() and not path_with_buildroot.is_file(): raise ParseError(f"{error_prefix} does not exist.") def check_dir_exists(val) -> None: error_prefix = f"Directory value `{val}` for option `{dest}` in `{self._scope_str()}`" try: path = Path(val) path_with_buildroot = Path(get_buildroot(), val) except TypeError: raise ParseError(f"{error_prefix} cannot be parsed as a directory path.") if not path.is_dir() and not path_with_buildroot.is_dir(): raise ParseError(f"{error_prefix} does not exist.") # Validate the final value. final_val = value_history.final_value if isinstance(final_val.value, list): for component in final_val.value: check_scalar_value(component) if inspect.isclass(member_type) and issubclass(member_type, Enum): if len(final_val.value) != len(set(final_val.value)): raise ParseError(f"Duplicate enum values specified in list: {final_val.value}") elif isinstance(final_val.value, dict): for component in final_val.value.values(): check_scalar_value(component) else: check_scalar_value(final_val.value) return value_history def _inverse_arg(self, arg: str) -> str | None: if not arg.startswith("--"): return None if arg.startswith("--no-"): raise BooleanOptionNameWithNo(self.scope, arg) return f"--no-{arg[2:]}" def _scope_str(self, scope: str | None = None) -> str: return self.scope_str(scope if scope is not None else self.scope) def __str__(self) -> str: return f"Parser({self._scope})"
frase = str(input('Digite uma frase: ')).strip().upper() print(f'A letra A aparece {frase.count('A')} vezes.') print(f'A primeira letra apareceu na posição {frase.find('A')+1}') print(f'A ultima letra apareceu na posição {frase.rfind('A')+1}')
frase = str(input('Digite uma frase: ')).strip().upper() print(f'A letra A aparece {frase.count("A")} vezes.') print(f'A primeira letra apareceu na posição {frase.find("A")+1}') print(f'A ultima letra apareceu na posição {frase.rfind("A")+1}')
import hashlib import os import smtplib import time import zipfile from wsgiref.util import FileWrapper from django.http import StreamingHttpResponse from django.utils import timezone from django.utils.decorators import method_decorator from django.views.decorators.csrf import ensure_csrf_cookie, csrf_exempt from django.conf import settings from account.models import User, AdminType from contest.models import Contest from judge.dispatcher import process_pending_task from judge.languages import languages, spj_languages from options.models import SysOptions as SysOptionsModel from options.options import SysOptions, OptionKeys from problem.models import Problem from submission.models import Submission, TestSubmission from utils.api import APIView, CSRFExemptAPIView, validate_serializer from utils.cache import cache from utils.constants import CacheKey from utils.shortcuts import send_email from utils.xss_filter import XSSHtml from .models import JudgeServer, DailyInfoStatus, BugCollections, AdviceCollection from .serializers import ( JudgeServerHeartbeatSerializer, CreateSMTPConfigSerializer, JudgeServerSerializer, BugSubmitSerializer, AdviceSubmitSerializer, TestSMTPConfigSerializer, EditJudgeServerSerializer) class BugSubmitAPI(APIView): @validate_serializer(BugSubmitSerializer) @csrf_exempt def post(self, request): req_body = request.data BugCollections.objects.create(**req_body) return self.success() def get(self, request): bug_list = BugCollections.objects.all().order_by("-bug_time") data = self.paginate_data(request, bug_list, BugSubmitSerializer) return self.success(data) class AdviceCollectAPI(APIView): @validate_serializer(AdviceSubmitSerializer) @csrf_exempt def post(self, request): req_body = request.data AdviceCollection.objects.create(**req_body) return self.success() def get(self, request): bug_list = AdviceCollection.objects.all() error_type = request.GET.get("error_type") if error_type: bug_list = bug_list.filter(bug_type=error_type) data = self.paginate_data(request, bug_list, AdviceSubmitSerializer) return self.success(data) class SMTPAPI(APIView): def get(self, request): smtp = SysOptions.smtp_config if not smtp: return self.success(None) return self.success(smtp) @validate_serializer(CreateSMTPConfigSerializer) def post(self, request): SysOptions.smtp_config = request.data return self.success() def put(self, request): smtp = SysOptions.smtp_config data = request.data for item in ["server", "port", "email", "tls"]: smtp[item] = data[item] if "password" in data: smtp["password"] = data["password"] SysOptions.smtp_config = smtp return self.success() class SMTPTestAPI(APIView): @validate_serializer(TestSMTPConfigSerializer) def post(self, request): if not SysOptions.smtp_config: return self.error("请先填写邮箱配置") try: send_email(smtp_config=SysOptions.smtp_config, from_name=SysOptions.website_name_shortcut, to_name=request.user.username, to_email=request.data["email"], subject="You have successfully configured SMTP", content="You have successfully configured SMTP") except smtplib.SMTPResponseException as e: # guess error message encoding msg = b"Failed to send email" try: msg = e.smtp_error # qq mail msg = msg.decode("gbk") except Exception: msg = msg.decode("utf-8", "ignore") return self.error(msg) except Exception as e: msg = str(e) return self.error(msg) return self.success() class WebsiteConfigAPI(APIView): @method_decorator(ensure_csrf_cookie) def get(self, request): config = ( "website_filing", "website_base_url", "website_head_logo", "website_logo", "website_name_shortcut", ) ret = { key: getattr( SysOptions, key) for key in config} return self.success(ret) def post(self, request): for k, v in request.data.items(): if k == "website_footer": with XSSHtml() as parser: v = parser.clean(v) setattr(SysOptions, k, v) return self.success() class JudgeServerAPI(APIView): def get(self, request): servers = JudgeServer.objects.all().order_by("-last_heartbeat") return self.success({"token": SysOptions.judge_server_token, "servers": JudgeServerSerializer(servers, many=True).data}) def delete(self, request): hostname = request.GET.get("hostname") if hostname: if request.session.get("_u_type") == AdminType.SUPER_ADMIN: JudgeServer.objects.filter(hostname=hostname).delete() else: return self.error("你没有这个权限") return self.success() @validate_serializer(EditJudgeServerSerializer) def put(self, request): is_disabled = request.data.get("is_disabled", False) JudgeServer.objects.filter( id=request.data["id"]).update( is_disabled=is_disabled) if not is_disabled: process_pending_task() is_reload = request.data.get("is_reload") if is_reload: if request.session.get("_u_type") == AdminType.SUPER_ADMIN: JudgeServer.objects.filter( id=request.data["id"]).update(task_number=0) return self.success() class JudgeServerHeartbeatAPI(CSRFExemptAPIView): @validate_serializer(JudgeServerHeartbeatSerializer) def post(self, request): data = request.data client_token = request.META.get("HTTP_X_JUDGE_SERVER_TOKEN") if hashlib.sha256(SysOptions.judge_server_token.encode( "utf-8")).hexdigest() != client_token: return self.error("Invalid token") try: server = JudgeServer.objects.get(hostname=data["hostname"]) server.judger_version = data["judger_version"] server.cpu_core = data["cpu_core"] server.memory_usage = data["memory"] server.cpu_usage = data["cpu"] server.service_url = data["service_url"] server.ip = request.META["REMOTE_ADDR"] server.last_heartbeat = timezone.now() server.save( update_fields=[ "judger_version", "cpu_core", "cpu_usage", "memory_usage", "service_url", "ip", "last_heartbeat"]) except JudgeServer.DoesNotExist: JudgeServer.objects.create(hostname=data["hostname"], judger_version=data["judger_version"], cpu_core=data["cpu_core"], memory_usage=data["memory"], cpu_usage=data["cpu"], ip=request.META["REMOTE_ADDR"], service_url=data["service_url"], last_heartbeat=timezone.now(), ) # 新server上线 处理队列中的,防止没有新的提交而导致一直waiting process_pending_task() return self.success() class LanguagesAPI(APIView): def get(self, request): return self.success( {"languages": languages, "spj_languages": spj_languages}) class DailyInfoStatusAPI(APIView): def daily_data(self, fields, limit=7, start_time="", end_time=""): values = DailyInfoStatus.objects.values(*fields) if start_time: values = values.filter(create_time__gte=start_time, create_time__lt=end_time) count = values.count() result = dict(count=count) for k in fields: result[k] = list() if not start_time and count >= limit: values = values[count - limit:] for val in values: for k in fields[:-1]: result[k].append(val[k]) result["create_time"].append(val["create_time"].strftime("%m-%d")) return result def get(self, request): limit = int(request.GET.get("limit", 0)) keyword = request.GET.get("keyword") start_time = request.GET.get("start_time") end_time = request.GET.get("end_time") is_cache = False if keyword: fields = (keyword, "create_time",) else: is_cache = True fields = ( "sub_count", "con_count", "accept_count", "active_count", "create_time",) if is_cache and not start_time: cache_key = f"{CacheKey.daily_result}:{time.strftime("%Y-%m-%d", time.localtime())}:{limit}" data = cache.get(cache_key) if not data: data = self.daily_data(fields, limit, start_time, end_time) cache.set(cache_key, data, timeout=3600 * 5) else: data = self.daily_data(fields, limit, start_time, end_time) return self.success(data=data) class UserInfoMatchRuleAPI(APIView): def get(self, request): key = OptionKeys.info_match_rule data = SysOptionsModel.objects.filter(key=key).values("value") if not data.exists(): data = {} data = data[0].get("value") return self.success(data) def post(self, request): data = request.data match_rule = data.get("match_rule") if match_rule: key = OptionKeys.info_match_rule r = SysOptionsModel.objects.filter( key=key).update(value=match_rule) if not r: return self.error("失败") else: return self.error("请确保数据的完整性") return self.success() class SchoolConfigRuleAPI(APIView): def post(self, request): which_one = request.data.pop("which_one") if which_one == 'school_detail': info = request.data.pop('school_detail', None) if not info: return self.error() SysOptions.school = info.get("name", "中国人的大学") SysOptions.school_detail = info else: info = request.data.pop('school_manager', None) if not info: return self.error() SysOptions.school_manager = info return self.success() def get(self, request): school_info = dict() school_info['school_detail'] = SysOptions.school_detail school_info['school_manager'] = SysOptions.school_manager return self.success(data=school_info) class TotalDataAPI(APIView): def get(self, request): r = self.__total_data() return self.success(data=r) def __total_data(self): total = dict() total['submit'] = Submission.objects.count() total['users'] = User.objects.filter(is_auth=True).count() total['contest'] = Contest.objects.filter(is_contest=True).count() total['problems'] = Problem.objects.count() total['test_submit'] = TestSubmission.objects.count() return total class TestCaseUnpackAPI(APIView): def zip_test_cases(self, test_case_path): start_dir = test_case_path # 要压缩的文件夹路径 file_news = test_case_path + '.zip' # 压缩后文件夹的名字 z = zipfile.ZipFile(file_news, 'w', zipfile.ZIP_DEFLATED) for dir_path, dir_names, file_names in os.walk(start_dir): f_path = dir_path.replace(start_dir, '') # 这一句很重要,不replace的话,就从根目录开始复制 f_path = f_path and f_path + os.sep or '' # 实现当前文件夹以及包含的所有文件的压缩 for filename in file_names: z.write(os.path.join(dir_path, filename), f_path + filename) z.close() return file_news def get(self, request): test_case_path = settings.TEST_CASE_DIR zip_path = self.zip_test_cases(test_case_path) response = StreamingHttpResponse( FileWrapper( open( zip_path, "rb")), content_type="application/octet-stream") response["Content-Disposition"] = f"attachment; filename=test_cases.zip" response["Content-Length"] = os.path.getsize(zip_path) return response
import hashlib import os import smtplib import time import zipfile from wsgiref.util import FileWrapper from django.http import StreamingHttpResponse from django.utils import timezone from django.utils.decorators import method_decorator from django.views.decorators.csrf import ensure_csrf_cookie, csrf_exempt from django.conf import settings from account.models import User, AdminType from contest.models import Contest from judge.dispatcher import process_pending_task from judge.languages import languages, spj_languages from options.models import SysOptions as SysOptionsModel from options.options import SysOptions, OptionKeys from problem.models import Problem from submission.models import Submission, TestSubmission from utils.api import APIView, CSRFExemptAPIView, validate_serializer from utils.cache import cache from utils.constants import CacheKey from utils.shortcuts import send_email from utils.xss_filter import XSSHtml from .models import JudgeServer, DailyInfoStatus, BugCollections, AdviceCollection from .serializers import ( JudgeServerHeartbeatSerializer, CreateSMTPConfigSerializer, JudgeServerSerializer, BugSubmitSerializer, AdviceSubmitSerializer, TestSMTPConfigSerializer, EditJudgeServerSerializer) class BugSubmitAPI(APIView): @validate_serializer(BugSubmitSerializer) @csrf_exempt def post(self, request): req_body = request.data BugCollections.objects.create(**req_body) return self.success() def get(self, request): bug_list = BugCollections.objects.all().order_by("-bug_time") data = self.paginate_data(request, bug_list, BugSubmitSerializer) return self.success(data) class AdviceCollectAPI(APIView): @validate_serializer(AdviceSubmitSerializer) @csrf_exempt def post(self, request): req_body = request.data AdviceCollection.objects.create(**req_body) return self.success() def get(self, request): bug_list = AdviceCollection.objects.all() error_type = request.GET.get("error_type") if error_type: bug_list = bug_list.filter(bug_type=error_type) data = self.paginate_data(request, bug_list, AdviceSubmitSerializer) return self.success(data) class SMTPAPI(APIView): def get(self, request): smtp = SysOptions.smtp_config if not smtp: return self.success(None) return self.success(smtp) @validate_serializer(CreateSMTPConfigSerializer) def post(self, request): SysOptions.smtp_config = request.data return self.success() def put(self, request): smtp = SysOptions.smtp_config data = request.data for item in ["server", "port", "email", "tls"]: smtp[item] = data[item] if "password" in data: smtp["password"] = data["password"] SysOptions.smtp_config = smtp return self.success() class SMTPTestAPI(APIView): @validate_serializer(TestSMTPConfigSerializer) def post(self, request): if not SysOptions.smtp_config: return self.error("请先填写邮箱配置") try: send_email(smtp_config=SysOptions.smtp_config, from_name=SysOptions.website_name_shortcut, to_name=request.user.username, to_email=request.data["email"], subject="You have successfully configured SMTP", content="You have successfully configured SMTP") except smtplib.SMTPResponseException as e: # guess error message encoding msg = b"Failed to send email" try: msg = e.smtp_error # qq mail msg = msg.decode("gbk") except Exception: msg = msg.decode("utf-8", "ignore") return self.error(msg) except Exception as e: msg = str(e) return self.error(msg) return self.success() class WebsiteConfigAPI(APIView): @method_decorator(ensure_csrf_cookie) def get(self, request): config = ( "website_filing", "website_base_url", "website_head_logo", "website_logo", "website_name_shortcut", ) ret = { key: getattr( SysOptions, key) for key in config} return self.success(ret) def post(self, request): for k, v in request.data.items(): if k == "website_footer": with XSSHtml() as parser: v = parser.clean(v) setattr(SysOptions, k, v) return self.success() class JudgeServerAPI(APIView): def get(self, request): servers = JudgeServer.objects.all().order_by("-last_heartbeat") return self.success({"token": SysOptions.judge_server_token, "servers": JudgeServerSerializer(servers, many=True).data}) def delete(self, request): hostname = request.GET.get("hostname") if hostname: if request.session.get("_u_type") == AdminType.SUPER_ADMIN: JudgeServer.objects.filter(hostname=hostname).delete() else: return self.error("你没有这个权限") return self.success() @validate_serializer(EditJudgeServerSerializer) def put(self, request): is_disabled = request.data.get("is_disabled", False) JudgeServer.objects.filter( id=request.data["id"]).update( is_disabled=is_disabled) if not is_disabled: process_pending_task() is_reload = request.data.get("is_reload") if is_reload: if request.session.get("_u_type") == AdminType.SUPER_ADMIN: JudgeServer.objects.filter( id=request.data["id"]).update(task_number=0) return self.success() class JudgeServerHeartbeatAPI(CSRFExemptAPIView): @validate_serializer(JudgeServerHeartbeatSerializer) def post(self, request): data = request.data client_token = request.META.get("HTTP_X_JUDGE_SERVER_TOKEN") if hashlib.sha256(SysOptions.judge_server_token.encode( "utf-8")).hexdigest() != client_token: return self.error("Invalid token") try: server = JudgeServer.objects.get(hostname=data["hostname"]) server.judger_version = data["judger_version"] server.cpu_core = data["cpu_core"] server.memory_usage = data["memory"] server.cpu_usage = data["cpu"] server.service_url = data["service_url"] server.ip = request.META["REMOTE_ADDR"] server.last_heartbeat = timezone.now() server.save( update_fields=[ "judger_version", "cpu_core", "cpu_usage", "memory_usage", "service_url", "ip", "last_heartbeat"]) except JudgeServer.DoesNotExist: JudgeServer.objects.create(hostname=data["hostname"], judger_version=data["judger_version"], cpu_core=data["cpu_core"], memory_usage=data["memory"], cpu_usage=data["cpu"], ip=request.META["REMOTE_ADDR"], service_url=data["service_url"], last_heartbeat=timezone.now(), ) # 新server上线 处理队列中的,防止没有新的提交而导致一直waiting process_pending_task() return self.success() class LanguagesAPI(APIView): def get(self, request): return self.success( {"languages": languages, "spj_languages": spj_languages}) class DailyInfoStatusAPI(APIView): def daily_data(self, fields, limit=7, start_time="", end_time=""): values = DailyInfoStatus.objects.values(*fields) if start_time: values = values.filter(create_time__gte=start_time, create_time__lt=end_time) count = values.count() result = dict(count=count) for k in fields: result[k] = list() if not start_time and count >= limit: values = values[count - limit:] for val in values: for k in fields[:-1]: result[k].append(val[k]) result["create_time"].append(val["create_time"].strftime("%m-%d")) return result def get(self, request): limit = int(request.GET.get("limit", 0)) keyword = request.GET.get("keyword") start_time = request.GET.get("start_time") end_time = request.GET.get("end_time") is_cache = False if keyword: fields = (keyword, "create_time",) else: is_cache = True fields = ( "sub_count", "con_count", "accept_count", "active_count", "create_time",) if is_cache and not start_time: cache_key = f"{CacheKey.daily_result}:{time.strftime('%Y-%m-%d', time.localtime())}:{limit}" data = cache.get(cache_key) if not data: data = self.daily_data(fields, limit, start_time, end_time) cache.set(cache_key, data, timeout=3600 * 5) else: data = self.daily_data(fields, limit, start_time, end_time) return self.success(data=data) class UserInfoMatchRuleAPI(APIView): def get(self, request): key = OptionKeys.info_match_rule data = SysOptionsModel.objects.filter(key=key).values("value") if not data.exists(): data = {} data = data[0].get("value") return self.success(data) def post(self, request): data = request.data match_rule = data.get("match_rule") if match_rule: key = OptionKeys.info_match_rule r = SysOptionsModel.objects.filter( key=key).update(value=match_rule) if not r: return self.error("失败") else: return self.error("请确保数据的完整性") return self.success() class SchoolConfigRuleAPI(APIView): def post(self, request): which_one = request.data.pop("which_one") if which_one == 'school_detail': info = request.data.pop('school_detail', None) if not info: return self.error() SysOptions.school = info.get("name", "中国人的大学") SysOptions.school_detail = info else: info = request.data.pop('school_manager', None) if not info: return self.error() SysOptions.school_manager = info return self.success() def get(self, request): school_info = dict() school_info['school_detail'] = SysOptions.school_detail school_info['school_manager'] = SysOptions.school_manager return self.success(data=school_info) class TotalDataAPI(APIView): def get(self, request): r = self.__total_data() return self.success(data=r) def __total_data(self): total = dict() total['submit'] = Submission.objects.count() total['users'] = User.objects.filter(is_auth=True).count() total['contest'] = Contest.objects.filter(is_contest=True).count() total['problems'] = Problem.objects.count() total['test_submit'] = TestSubmission.objects.count() return total class TestCaseUnpackAPI(APIView): def zip_test_cases(self, test_case_path): start_dir = test_case_path # 要压缩的文件夹路径 file_news = test_case_path + '.zip' # 压缩后文件夹的名字 z = zipfile.ZipFile(file_news, 'w', zipfile.ZIP_DEFLATED) for dir_path, dir_names, file_names in os.walk(start_dir): f_path = dir_path.replace(start_dir, '') # 这一句很重要,不replace的话,就从根目录开始复制 f_path = f_path and f_path + os.sep or '' # 实现当前文件夹以及包含的所有文件的压缩 for filename in file_names: z.write(os.path.join(dir_path, filename), f_path + filename) z.close() return file_news def get(self, request): test_case_path = settings.TEST_CASE_DIR zip_path = self.zip_test_cases(test_case_path) response = StreamingHttpResponse( FileWrapper( open( zip_path, "rb")), content_type="application/octet-stream") response["Content-Disposition"] = f"attachment; filename=test_cases.zip" response["Content-Length"] = os.path.getsize(zip_path) return response
from itertools import chain from typing import List from collections import namedtuple, defaultdict import logging logger = logging.getLogger() EC2Url = namedtuple('EC2Url', ['url', 'instance_id']) def get_ec2_urls(ec2_client) -> List[EC2Url]: """ Returns a collection of EC2 instances URL addresses which exposed to the internet. param ec2_client: botocore.client.EC2 """ urls = set() resevs = list(chain(*(page['Reservations'] for page in ec2_client.get_paginator('describe_instances').paginate( Filters=[ { 'Name': 'instance-state-name', 'Values': ['running'] } ])))) group_ids_to_instances = defaultdict(list) for instances in resevs: for instance_data in instances['Instances']: i_id = instance_data['InstanceId'] for network_inr in instance_data['NetworkInterfaces']: association = network_inr.get('Association') if not association: continue public_ip = association.get('PublicIp') if public_ip is None: continue # only collect public ip addresses for group in network_inr.get('Groups', []): group_ids_to_instances[group['GroupId']].append((public_ip, i_id)) if not group_ids_to_instances: return list(urls) group_ids_to_instances = dict(group_ids_to_instances) sec_groups = list( chain(*(page['SecurityGroups'] for page in ec2_client.get_paginator('describe_security_groups').paginate( GroupIds=list(group_ids_to_instances.keys()) )))) for sec in sec_groups: for ip_prem in sec['IpPermissions']: if ip_prem['FromPort'] == '-1': continue # we can skip DHCP related rules for ip_range in ip_prem['IpRanges']: if ip_range['CidrIp'].startswith("0.0.0.0/"): for ec2_info in group_ids_to_instances[sec['GroupId']]: urls.add(EC2Url(f'http://{ec2_info[0]}:{ip_prem['FromPort']}/', ec2_info[1])) urls.add(EC2Url(f'https://{ec2_info[0]}:{ip_prem['FromPort']}/', ec2_info[1])) return list(urls) LoadBalancerUrl = namedtuple('LoadBalancerUrl', ['url', 'identifier', 'header', 'explicit_method', 'params']) def _get_string_from_reg(s: str) -> str: example_string = s.replace('?', 'a') return example_string.replace('*', 'test') def get_load_balancers(elb_client) -> List[LoadBalancerUrl]: """ Returns a collection of load balancers URL addresses which exposed to the internet. param elb_client: botocore.client.ELB2 """ results = list() response = elb_client.describe_load_balancers() lb_info = response['LoadBalancers'] for lb in lb_info: if lb['Scheme'] != 'internet-facing': continue dns_name = lb['DNSName'] load_balancer_identifier = lb['LoadBalancerName'] resp_listener = elb_client.describe_listeners(LoadBalancerArn=lb['LoadBalancerArn']) for listener in resp_listener['Listeners']: port = listener['Port'] if listener['Protocol'] == 'HTTP': protocols = ['http'] elif listener['Protocol'] == 'HTTPS': protocols = ['https'] else: protocols = ['http', 'https'] rules = elb_client.describe_rules(ListenerArn=listener['ListenerArn'])['Rules'] if not rules: for p in protocols: results.append(LoadBalancerUrl( url=f'{p}://{dns_name}:{port}/', identifier=load_balancer_identifier, header={}, explicit_method=None, params={} )) for rule in rules: for action in rule.get('Actions', []): if action['Type'] in ('fixed-response', 'authenticate-oidc', 'authenticate-cognito'): continue # We don't want to check the URI, if the target of the LB of those types. subdomain = "" params = {} explicit_request_method = None uri_to_append = '/' headers_needed = {} for condition in rule.get('Conditions', []): if condition['Field'] == 'http-header': header_config = condition['HttpHeaderConfig'] headers_needed[header_config['HttpHeaderName']] = _get_string_from_reg(header_config['Values'][0]) elif condition['Field'] == 'path-pattern': path_config = condition['PathPatternConfig'] uri_to_append = _get_string_from_reg(path_config['Values'][0]) if uri_to_append[0] != '/': uri_to_append = '/' + uri_to_append elif condition['Field'] == 'host-header': host_config = condition['HostHeaderConfig'] subdomain = f'{_get_string_from_reg(host_config['Values'][0])}.' elif condition['Field'] == 'query-string': query_config = condition['QueryStringConfig'] for val in query_config["Values"]: if 'Key' in val: params[val['Key']] = _get_string_from_reg(val['Value']) else: params['test'] = _get_string_from_reg(val['Value']) elif condition['Field'] == 'http-request-method': request_config = condition['HttpRequestMethodConfig'] explicit_request_method = request_config["Values"][0] else: continue for p in protocols: results.append(LoadBalancerUrl( url=f'{p}://{subdomain}{dns_name}:{port}{uri_to_append}', identifier=load_balancer_identifier, header=headers_needed, explicit_method=explicit_request_method, params=params )) return results
from itertools import chain from typing import List from collections import namedtuple, defaultdict import logging logger = logging.getLogger() EC2Url = namedtuple('EC2Url', ['url', 'instance_id']) def get_ec2_urls(ec2_client) -> List[EC2Url]: """ Returns a collection of EC2 instances URL addresses which exposed to the internet. param ec2_client: botocore.client.EC2 """ urls = set() resevs = list(chain(*(page['Reservations'] for page in ec2_client.get_paginator('describe_instances').paginate( Filters=[ { 'Name': 'instance-state-name', 'Values': ['running'] } ])))) group_ids_to_instances = defaultdict(list) for instances in resevs: for instance_data in instances['Instances']: i_id = instance_data['InstanceId'] for network_inr in instance_data['NetworkInterfaces']: association = network_inr.get('Association') if not association: continue public_ip = association.get('PublicIp') if public_ip is None: continue # only collect public ip addresses for group in network_inr.get('Groups', []): group_ids_to_instances[group['GroupId']].append((public_ip, i_id)) if not group_ids_to_instances: return list(urls) group_ids_to_instances = dict(group_ids_to_instances) sec_groups = list( chain(*(page['SecurityGroups'] for page in ec2_client.get_paginator('describe_security_groups').paginate( GroupIds=list(group_ids_to_instances.keys()) )))) for sec in sec_groups: for ip_prem in sec['IpPermissions']: if ip_prem['FromPort'] == '-1': continue # we can skip DHCP related rules for ip_range in ip_prem['IpRanges']: if ip_range['CidrIp'].startswith("0.0.0.0/"): for ec2_info in group_ids_to_instances[sec['GroupId']]: urls.add(EC2Url(f'http://{ec2_info[0]}:{ip_prem["FromPort"]}/', ec2_info[1])) urls.add(EC2Url(f'https://{ec2_info[0]}:{ip_prem["FromPort"]}/', ec2_info[1])) return list(urls) LoadBalancerUrl = namedtuple('LoadBalancerUrl', ['url', 'identifier', 'header', 'explicit_method', 'params']) def _get_string_from_reg(s: str) -> str: example_string = s.replace('?', 'a') return example_string.replace('*', 'test') def get_load_balancers(elb_client) -> List[LoadBalancerUrl]: """ Returns a collection of load balancers URL addresses which exposed to the internet. param elb_client: botocore.client.ELB2 """ results = list() response = elb_client.describe_load_balancers() lb_info = response['LoadBalancers'] for lb in lb_info: if lb['Scheme'] != 'internet-facing': continue dns_name = lb['DNSName'] load_balancer_identifier = lb['LoadBalancerName'] resp_listener = elb_client.describe_listeners(LoadBalancerArn=lb['LoadBalancerArn']) for listener in resp_listener['Listeners']: port = listener['Port'] if listener['Protocol'] == 'HTTP': protocols = ['http'] elif listener['Protocol'] == 'HTTPS': protocols = ['https'] else: protocols = ['http', 'https'] rules = elb_client.describe_rules(ListenerArn=listener['ListenerArn'])['Rules'] if not rules: for p in protocols: results.append(LoadBalancerUrl( url=f'{p}://{dns_name}:{port}/', identifier=load_balancer_identifier, header={}, explicit_method=None, params={} )) for rule in rules: for action in rule.get('Actions', []): if action['Type'] in ('fixed-response', 'authenticate-oidc', 'authenticate-cognito'): continue # We don't want to check the URI, if the target of the LB of those types. subdomain = "" params = {} explicit_request_method = None uri_to_append = '/' headers_needed = {} for condition in rule.get('Conditions', []): if condition['Field'] == 'http-header': header_config = condition['HttpHeaderConfig'] headers_needed[header_config['HttpHeaderName']] = _get_string_from_reg(header_config['Values'][0]) elif condition['Field'] == 'path-pattern': path_config = condition['PathPatternConfig'] uri_to_append = _get_string_from_reg(path_config['Values'][0]) if uri_to_append[0] != '/': uri_to_append = '/' + uri_to_append elif condition['Field'] == 'host-header': host_config = condition['HostHeaderConfig'] subdomain = f'{_get_string_from_reg(host_config["Values"][0])}.' elif condition['Field'] == 'query-string': query_config = condition['QueryStringConfig'] for val in query_config["Values"]: if 'Key' in val: params[val['Key']] = _get_string_from_reg(val['Value']) else: params['test'] = _get_string_from_reg(val['Value']) elif condition['Field'] == 'http-request-method': request_config = condition['HttpRequestMethodConfig'] explicit_request_method = request_config["Values"][0] else: continue for p in protocols: results.append(LoadBalancerUrl( url=f'{p}://{subdomain}{dns_name}:{port}{uri_to_append}', identifier=load_balancer_identifier, header=headers_needed, explicit_method=explicit_request_method, params=params )) return results
import itertools import json import time import click from flask import current_app from flask.cli import with_appcontext from app.common import client from app.common import datasets as datasets_fcts from app.common import path from app.common.projection import epsg_string_to_proj4 from app.models import geofile, storage @click.command("update-all-datasets") @with_appcontext def update_all_datasets(): datasets = client.get_dataset_list(disable_filtering=True) for dataset in datasets: process_dataset(dataset) @click.command("update-dataset") @click.argument("ds_id") @click.option("-p", "--prettyprint", is_flag=True) @with_appcontext def update_dataset(ds_id, prettyprint): datasets = client.get_dataset_list(disable_filtering=True) datasets = [x for x in datasets if x["ds_id"] == int(ds_id)] if len(datasets) == 1: process_dataset(datasets[0], prettyprint) @click.command("update-areas") @with_appcontext def update_areas(): areas = client.get_areas() for area in areas: process_area(area["id"]) @click.command("list-datasets") @click.option("-p", "--prettyprint", is_flag=True) @with_appcontext def list_datasets(prettyprint): datasets = client.get_dataset_list(disable_filtering=True, pretty_print=prettyprint) result = "\n" for dataset in datasets: type = "raster" if dataset["is_raster"] else "vector" result += f"- {dataset["ds_id"]}: {dataset["title"]} ({type})\n" current_app.logger.info(result) @click.command("get-parameters") @click.argument("ds_id") @click.option("--processing", is_flag=True) @click.option("-p", "--prettyprint", is_flag=True) @with_appcontext def get_parameters(ds_id, processing, prettyprint): def _process_list(result, parameters, name, label): if len(parameters[name]) > 0: if result != "\n": result += "\n" result += f"{label}:\n" for value in parameters[name]: result += f"- {value}\n" return result def _process_dict(result, parameters, name, label): if len(parameters[name]) > 0: if result != "\n": result += "\n" result += f"{label}:\n" for key, value in parameters[name].items(): result += f"- {key}: {value}\n" return result def _process_variable(result, parameters, name, label): if parameters[name] is not None: if result != "\n": result += "\n" result += f"{label}: {parameters[name]}\n" return result def _display_parameters(result, parameters): result = _process_list(result, parameters, "variables", "Variables") result = _process_list(result, parameters, "time_periods", "Time periods") result = _process_dict(result, parameters, "fields", "Fields") result = _process_list(result, parameters, "levels", "Levels") result = _process_variable( result, parameters, "temporal_granularity", "Temporal granularity" ) result = _process_variable(result, parameters, "start_at", "Start date") result = _process_variable(result, parameters, "end_at", "End date") result = _process_dict( result, parameters, "default_parameters", "Default parameters" ) return result datasets = client.get_dataset_list(disable_filtering=True) datasets = [x for x in datasets if x["ds_id"] == int(ds_id)] if len(datasets) == 1: parameters = client.get_parameters( datasets[0]["ds_id"], pretty_print=prettyprint ) result = "\n" result = _display_parameters(result, parameters) if result == "\n": result += "No variable nor time period" elif processing: result += "\n------------->\n" datasets_fcts.process_parameters(parameters) result = _display_parameters(result, parameters) current_app.logger.info(result) @click.command("get-legend") @click.argument("ds_id") @click.option("-v", "--variable", default=None) @click.option("-t", "--time_period", default=None) @click.option("-p", "--prettyprint", is_flag=True) @with_appcontext def get_legend(ds_id, variable, time_period, prettyprint): datasets = client.get_dataset_list(disable_filtering=True) datasets = [x for x in datasets if x["ds_id"] == int(ds_id)] if len(datasets) == 1: dataset = datasets[0] type = "raster" if dataset["is_raster"] else "vector" layer_name = path.make_unique_layer_name( type, ds_id, variable=variable, time_period=time_period ) legend = client.get_legend(layer_name, pretty_print=prettyprint) result = "\n" result += json.dumps(legend, indent=4) current_app.logger.info(result) def process_dataset(dataset, pretty_print=False): type = path.RASTER if dataset["is_raster"] else path.VECTOR # Retrieve the variables of the dataset parameters = client.get_parameters(dataset["ds_id"]) datasets_fcts.process_parameters(parameters) # For vector datasets, we want to retrieve all variables at once if type == path.VECTOR: parameters["variables"] = [] # Iterate over all combinations of variables and time_periods raster_file = None if (len(parameters["variables"]) > 0) and (len(parameters["time_periods"]) > 0): for variable, time_period in itertools.product( parameters["variables"], parameters["time_periods"] ): _, raster_file = process_layer( type, dataset["ds_id"], variable=variable, time_period=time_period, pretty_print=pretty_print, ) elif len(parameters["variables"]) > 0: for variable in parameters["variables"]: _, raster_file = process_layer( type, dataset["ds_id"], variable=variable, pretty_print=pretty_print ) elif len(parameters["time_periods"]) > 0: valid_combinations = {} for time_period in parameters["time_periods"]: valid_variables, raster_file = process_layer( type, dataset["ds_id"], time_period=time_period, pretty_print=pretty_print, ) if valid_variables is not None: if time_period not in valid_combinations: valid_combinations[time_period] = [] valid_combinations[time_period].extend(valid_variables) if (type == path.VECTOR) and (len(valid_combinations) > 0): layer_name = path.make_unique_layer_name(type, dataset["ds_id"]) storage_instance = storage.create(layer_name) with open( storage_instance.get_combinations_file(layer_name), "w", ) as f: json.dump(valid_combinations, f) else: _, raster_file = process_layer( type, dataset["ds_id"], pretty_print=pretty_print ) # For raster datasets, save the projection in a file if type == path.RASTER: current_app.logger.info("... save projection") layer_name = path.make_unique_layer_name(type, dataset["ds_id"]) geofile.save_raster_projection( layer_name, epsg_string_to_proj4(current_app.config["RASTER_PROJECTION_SYSTEM"]), ) def process_layer(type, id, variable=None, time_period=None, pretty_print=False): layer_name = path.make_unique_layer_name( type, id, variable=variable, time_period=time_period ) current_app.logger.info(f"Download geojson <{layer_name}>...") time_started = time.time() data = client.get_geojson(layer_name, pretty_print) if (data is None) or (data["features"] is None) or (len(data["features"]) == 0): return None time_fetched = time.time() current_app.logger.info( f"... fetch done in {int(time_fetched - time_started)} seconds" ) geofile.delete_all_features(layer_name) valid_variables = None raster_file = None if type == path.VECTOR: valid_variables = geofile.save_vector_geojson(layer_name, data) else: current_app.logger.info("... save geometries") geofile.save_raster_geometries(layer_name, data) # Don't download raster files if we have directly access to them if current_app.config["RASTER_CACHE_DIR"] is None: for feature in data["features"]: feature_id = feature["id"] current_app.logger.info(f"... download raster file <{feature_id}>") raster_content = client.get_raster_file(id, feature_id) if raster_content is not None: geofile.save_raster_file(layer_name, feature_id, raster_content) # Retrieve the name of one raster file, in case we need to retrieve infos from it if len(data["features"]) > 0: storage_instance = storage.create_for_layer_type(type) raster_file = storage_instance.get_file_path( layer_name, data["features"][0]["id"] ) time_saved = time.time() current_app.logger.info( f"... save done in {int(time_saved - time_fetched)} seconds." ) return valid_variables, raster_file def process_area(id): layer_name = path.make_unique_layer_name(path.AREA, id) current_app.logger.info(f"Download area <{id}>...") time_started = time.time() data = client.get_area(id) time_fetched = time.time() current_app.logger.info( f"... fetch done in {int(time_fetched - time_started)} seconds" ) geofile.save_vector_geojson(layer_name, data) time_saved = time.time() current_app.logger.info( f"... save done in {int(time_saved - time_fetched)} seconds." )
import itertools import json import time import click from flask import current_app from flask.cli import with_appcontext from app.common import client from app.common import datasets as datasets_fcts from app.common import path from app.common.projection import epsg_string_to_proj4 from app.models import geofile, storage @click.command("update-all-datasets") @with_appcontext def update_all_datasets(): datasets = client.get_dataset_list(disable_filtering=True) for dataset in datasets: process_dataset(dataset) @click.command("update-dataset") @click.argument("ds_id") @click.option("-p", "--prettyprint", is_flag=True) @with_appcontext def update_dataset(ds_id, prettyprint): datasets = client.get_dataset_list(disable_filtering=True) datasets = [x for x in datasets if x["ds_id"] == int(ds_id)] if len(datasets) == 1: process_dataset(datasets[0], prettyprint) @click.command("update-areas") @with_appcontext def update_areas(): areas = client.get_areas() for area in areas: process_area(area["id"]) @click.command("list-datasets") @click.option("-p", "--prettyprint", is_flag=True) @with_appcontext def list_datasets(prettyprint): datasets = client.get_dataset_list(disable_filtering=True, pretty_print=prettyprint) result = "\n" for dataset in datasets: type = "raster" if dataset["is_raster"] else "vector" result += f"- {dataset['ds_id']}: {dataset['title']} ({type})\n" current_app.logger.info(result) @click.command("get-parameters") @click.argument("ds_id") @click.option("--processing", is_flag=True) @click.option("-p", "--prettyprint", is_flag=True) @with_appcontext def get_parameters(ds_id, processing, prettyprint): def _process_list(result, parameters, name, label): if len(parameters[name]) > 0: if result != "\n": result += "\n" result += f"{label}:\n" for value in parameters[name]: result += f"- {value}\n" return result def _process_dict(result, parameters, name, label): if len(parameters[name]) > 0: if result != "\n": result += "\n" result += f"{label}:\n" for key, value in parameters[name].items(): result += f"- {key}: {value}\n" return result def _process_variable(result, parameters, name, label): if parameters[name] is not None: if result != "\n": result += "\n" result += f"{label}: {parameters[name]}\n" return result def _display_parameters(result, parameters): result = _process_list(result, parameters, "variables", "Variables") result = _process_list(result, parameters, "time_periods", "Time periods") result = _process_dict(result, parameters, "fields", "Fields") result = _process_list(result, parameters, "levels", "Levels") result = _process_variable( result, parameters, "temporal_granularity", "Temporal granularity" ) result = _process_variable(result, parameters, "start_at", "Start date") result = _process_variable(result, parameters, "end_at", "End date") result = _process_dict( result, parameters, "default_parameters", "Default parameters" ) return result datasets = client.get_dataset_list(disable_filtering=True) datasets = [x for x in datasets if x["ds_id"] == int(ds_id)] if len(datasets) == 1: parameters = client.get_parameters( datasets[0]["ds_id"], pretty_print=prettyprint ) result = "\n" result = _display_parameters(result, parameters) if result == "\n": result += "No variable nor time period" elif processing: result += "\n------------->\n" datasets_fcts.process_parameters(parameters) result = _display_parameters(result, parameters) current_app.logger.info(result) @click.command("get-legend") @click.argument("ds_id") @click.option("-v", "--variable", default=None) @click.option("-t", "--time_period", default=None) @click.option("-p", "--prettyprint", is_flag=True) @with_appcontext def get_legend(ds_id, variable, time_period, prettyprint): datasets = client.get_dataset_list(disable_filtering=True) datasets = [x for x in datasets if x["ds_id"] == int(ds_id)] if len(datasets) == 1: dataset = datasets[0] type = "raster" if dataset["is_raster"] else "vector" layer_name = path.make_unique_layer_name( type, ds_id, variable=variable, time_period=time_period ) legend = client.get_legend(layer_name, pretty_print=prettyprint) result = "\n" result += json.dumps(legend, indent=4) current_app.logger.info(result) def process_dataset(dataset, pretty_print=False): type = path.RASTER if dataset["is_raster"] else path.VECTOR # Retrieve the variables of the dataset parameters = client.get_parameters(dataset["ds_id"]) datasets_fcts.process_parameters(parameters) # For vector datasets, we want to retrieve all variables at once if type == path.VECTOR: parameters["variables"] = [] # Iterate over all combinations of variables and time_periods raster_file = None if (len(parameters["variables"]) > 0) and (len(parameters["time_periods"]) > 0): for variable, time_period in itertools.product( parameters["variables"], parameters["time_periods"] ): _, raster_file = process_layer( type, dataset["ds_id"], variable=variable, time_period=time_period, pretty_print=pretty_print, ) elif len(parameters["variables"]) > 0: for variable in parameters["variables"]: _, raster_file = process_layer( type, dataset["ds_id"], variable=variable, pretty_print=pretty_print ) elif len(parameters["time_periods"]) > 0: valid_combinations = {} for time_period in parameters["time_periods"]: valid_variables, raster_file = process_layer( type, dataset["ds_id"], time_period=time_period, pretty_print=pretty_print, ) if valid_variables is not None: if time_period not in valid_combinations: valid_combinations[time_period] = [] valid_combinations[time_period].extend(valid_variables) if (type == path.VECTOR) and (len(valid_combinations) > 0): layer_name = path.make_unique_layer_name(type, dataset["ds_id"]) storage_instance = storage.create(layer_name) with open( storage_instance.get_combinations_file(layer_name), "w", ) as f: json.dump(valid_combinations, f) else: _, raster_file = process_layer( type, dataset["ds_id"], pretty_print=pretty_print ) # For raster datasets, save the projection in a file if type == path.RASTER: current_app.logger.info("... save projection") layer_name = path.make_unique_layer_name(type, dataset["ds_id"]) geofile.save_raster_projection( layer_name, epsg_string_to_proj4(current_app.config["RASTER_PROJECTION_SYSTEM"]), ) def process_layer(type, id, variable=None, time_period=None, pretty_print=False): layer_name = path.make_unique_layer_name( type, id, variable=variable, time_period=time_period ) current_app.logger.info(f"Download geojson <{layer_name}>...") time_started = time.time() data = client.get_geojson(layer_name, pretty_print) if (data is None) or (data["features"] is None) or (len(data["features"]) == 0): return None time_fetched = time.time() current_app.logger.info( f"... fetch done in {int(time_fetched - time_started)} seconds" ) geofile.delete_all_features(layer_name) valid_variables = None raster_file = None if type == path.VECTOR: valid_variables = geofile.save_vector_geojson(layer_name, data) else: current_app.logger.info("... save geometries") geofile.save_raster_geometries(layer_name, data) # Don't download raster files if we have directly access to them if current_app.config["RASTER_CACHE_DIR"] is None: for feature in data["features"]: feature_id = feature["id"] current_app.logger.info(f"... download raster file <{feature_id}>") raster_content = client.get_raster_file(id, feature_id) if raster_content is not None: geofile.save_raster_file(layer_name, feature_id, raster_content) # Retrieve the name of one raster file, in case we need to retrieve infos from it if len(data["features"]) > 0: storage_instance = storage.create_for_layer_type(type) raster_file = storage_instance.get_file_path( layer_name, data["features"][0]["id"] ) time_saved = time.time() current_app.logger.info( f"... save done in {int(time_saved - time_fetched)} seconds." ) return valid_variables, raster_file def process_area(id): layer_name = path.make_unique_layer_name(path.AREA, id) current_app.logger.info(f"Download area <{id}>...") time_started = time.time() data = client.get_area(id) time_fetched = time.time() current_app.logger.info( f"... fetch done in {int(time_fetched - time_started)} seconds" ) geofile.save_vector_geojson(layer_name, data) time_saved = time.time() current_app.logger.info( f"... save done in {int(time_saved - time_fetched)} seconds." )
import json from pathlib import ( Path, ) from typing import ( TYPE_CHECKING, Any, Dict, Generator, Iterable, List, Optional, Tuple, Type, Union, cast, ) from eth_typing import ( URI, Address, ContractName, Manifest, ) from eth_utils import ( to_canonical_address, to_dict, to_text, to_tuple, ) from ethpm._utils.cache import ( cached_property, ) from ethpm._utils.contract import ( generate_contract_factory_kwargs, ) from ethpm._utils.deployments import ( get_linked_deployments, normalize_linked_references, validate_deployments_tx_receipt, validate_linked_references, ) from ethpm.contract import ( LinkableContract, ) from ethpm.dependencies import ( Dependencies, ) from ethpm.deployments import ( DeploymentData, Deployments, ) from ethpm.exceptions import ( BytecodeLinkingError, EthPMValidationError, FailureToFetchIPFSAssetsError, InsufficientAssetsError, PyEthPMError, ) from ethpm.uri import ( resolve_uri_contents, ) from ethpm.validation.manifest import ( check_for_deployments, validate_build_dependencies_are_present, validate_manifest_against_schema, validate_manifest_deployments, validate_raw_manifest_format, ) from ethpm.validation.misc import ( validate_w3_instance, ) from ethpm.validation.package import ( validate_build_dependency, validate_contract_name, validate_minimal_contract_factory_data, ) from ethpm.validation.uri import ( validate_single_matching_uri, ) from web3._utils.validation import ( validate_address, ) from web3.eth import ( Contract, ) if TYPE_CHECKING: from web3 import Web3 # noqa: F401 class Package(object): def __init__( self, manifest: Dict[str, Any], w3: "Web3", uri: Optional[str] = None ) -> None: """ A package should be created using one of the available classmethods and a valid w3 instance. """ if not isinstance(manifest, dict): raise TypeError( "Package object must be initialized with a dictionary. " f"Got {type(manifest)}" ) if "manifest" not in manifest or manifest["manifest"] != "ethpm/3": raise EthPMValidationError( "Py-Ethpm currently only supports v3 ethpm manifests. " "Please use the CLI to update or re-generate a v3 manifest. " ) validate_manifest_against_schema(manifest) validate_manifest_deployments(manifest) validate_w3_instance(w3) self.w3 = w3 self.w3.eth.defaultContractFactory = cast(Type[Contract], LinkableContract) self.manifest = manifest self._uri = uri def update_w3(self, w3: "Web3") -> "Package": """ Returns a new instance of `Package` containing the same manifest, but connected to a different web3 instance. .. doctest:: >>> new_w3 = Web3(Web3.EthereumTesterProvider()) >>> NewPackage = OwnedPackage.update_w3(new_w3) >>> assert NewPackage.w3 == new_w3 >>> assert OwnedPackage.manifest == NewPackage.manifest """ validate_w3_instance(w3) return Package(self.manifest, w3, self.uri) def __repr__(self) -> str: """ String readable representation of the Package. .. doctest:: >>> OwnedPackage.__repr__() '<Package owned==1.0.0>' """ name = self.name version = self.version return f"<Package {name}=={version}>" @property def name(self) -> str: """ The name of this ``Package``. .. doctest:: >>> OwnedPackage.name 'owned' """ return self.manifest["name"] @property def version(self) -> str: """ The package version of a ``Package``. .. doctest:: >>> OwnedPackage.version '1.0.0' """ return self.manifest["version"] @property def manifest_version(self) -> str: """ The manifest version of a ``Package``. .. doctest:: >>> OwnedPackage.manifest_version 'ethpm/3' """ return self.manifest["manifest"] @property def uri(self) -> Optional[str]: """ The uri (local file_path / content-addressed URI) of a ``Package``'s manifest. """ return self._uri @property def contract_types(self) -> List[str]: """ All contract types included in this package. """ if 'contractTypes' in self.manifest: return sorted(self.manifest['contractTypes'].keys()) else: raise ValueError("No contract types found in manifest; {self.__repr__()}.") @classmethod def from_file(cls, file_path: Path, w3: "Web3") -> "Package": """ Returns a ``Package`` instantiated by a manifest located at the provided Path. ``file_path`` arg must be a ``pathlib.Path`` instance. A valid ``Web3`` instance is required to instantiate a ``Package``. """ if isinstance(file_path, Path): raw_manifest = file_path.read_text() validate_raw_manifest_format(raw_manifest) manifest = json.loads(raw_manifest) else: raise TypeError( "The Package.from_file method expects a pathlib.Path instance." f"Got {type(file_path)} instead." ) return cls(manifest, w3, file_path.as_uri()) @classmethod def from_uri(cls, uri: URI, w3: "Web3") -> "Package": """ Returns a Package object instantiated by a manifest located at a content-addressed URI. A valid ``Web3`` instance is also required. URI schemes supported: - IPFS: `ipfs://Qm...` - HTTP: `https://api.github.com/repos/:owner/:repo/git/blobs/:file_sha` - Registry: `erc1319://registry.eth:1/greeter?version=1.0.0` .. code:: python OwnedPackage = Package.from_uri('ipfs://QmbeVyFLSuEUxiXKwSsEjef7icpdTdA4kGG9BcrJXKNKUW', w3) # noqa: E501 """ contents = to_text(resolve_uri_contents(uri)) validate_raw_manifest_format(contents) manifest = json.loads(contents) return cls(manifest, w3, uri) # # Contracts # def get_contract_factory(self, name: ContractName) -> LinkableContract: """ Return the contract factory for a given contract type, generated from the data vailable in ``Package.manifest``. Contract factories are accessible from the package class. .. code:: python Owned = OwnedPackage.get_contract_factory('owned') In cases where a contract uses a library, the contract factory will have unlinked bytecode. The ``ethpm`` package ships with its own subclass of ``web3.contract.Contract``, ``ethpm.contract.LinkableContract`` with a few extra methods and properties related to bytecode linking. .. code:: python >>> math = owned_package.contract_factories.math >>> math.needs_bytecode_linking True >>> linked_math = math.link_bytecode({'MathLib': '0x1234...'}) >>> linked_math.needs_bytecode_linking False """ validate_contract_name(name) if "contractTypes" not in self.manifest: raise InsufficientAssetsError( "This package does not contain any contract type data." ) try: contract_data = self.manifest["contractTypes"][name] except KeyError: raise InsufficientAssetsError( "This package does not contain any package data to generate " f"a contract factory for contract type: {name}. Available contract types include: " f"{self.contract_types}." ) validate_minimal_contract_factory_data(contract_data) contract_kwargs = generate_contract_factory_kwargs(contract_data) contract_factory = self.w3.eth.contract(**contract_kwargs) return contract_factory def get_contract_instance(self, name: ContractName, address: Address) -> Contract: """ Will return a ``Web3.contract`` instance generated from the contract type data available in ``Package.manifest`` and the provided ``address``. The provided ``address`` must be valid on the connected chain available through ``Package.w3``. """ validate_address(address) validate_contract_name(name) try: self.manifest["contractTypes"][name]["abi"] except KeyError: raise InsufficientAssetsError( "Package does not have the ABI required to generate a contract instance " f"for contract: {name} at address: {address}." ) contract_kwargs = generate_contract_factory_kwargs( self.manifest["contractTypes"][name] ) contract_instance = self.w3.eth.contract( address=address, **contract_kwargs ) return contract_instance # # Build Dependencies # @cached_property def build_dependencies(self) -> "Dependencies": """ Return `Dependencies` instance containing the build dependencies available on this Package. The ``Package`` class should provide access to the full dependency tree. .. code:: python >>> owned_package.build_dependencies['zeppelin'] <ZeppelinPackage> """ validate_build_dependencies_are_present(self.manifest) dependencies = self.manifest["buildDependencies"] dependency_packages = {} for name, uri in dependencies.items(): try: validate_build_dependency(name, uri) dependency_package = Package.from_uri(uri, self.w3) except PyEthPMError as e: raise FailureToFetchIPFSAssetsError( f"Failed to retrieve build dependency: {name} from URI: {uri}.\n" f"Got error: {e}." ) else: dependency_packages[name] = dependency_package return Dependencies(dependency_packages) # # Deployments # @cached_property def deployments(self) -> Union["Deployments", Dict[None, None]]: """ Returns a ``Deployments`` object containing all the deployment data and contract instances of a ``Package``'s `contract_types`. Automatically filters deployments to only expose those available on the current ``Package.w3`` instance. .. code:: python package.deployments.get_instance("ContractType") """ if not check_for_deployments(self.manifest): return {} all_blockchain_uris = self.manifest["deployments"].keys() matching_uri = validate_single_matching_uri(all_blockchain_uris, self.w3) deployments = self.manifest["deployments"][matching_uri] all_contract_instances = self._get_all_contract_instances(deployments) validate_deployments_tx_receipt(deployments, self.w3, allow_missing_data=True) linked_deployments = get_linked_deployments(deployments) if linked_deployments: for deployment_data in linked_deployments.values(): on_chain_bytecode = self.w3.eth.get_code( deployment_data["address"] ) unresolved_linked_refs = normalize_linked_references( deployment_data["runtimeBytecode"]["linkDependencies"] ) resolved_linked_refs = tuple( self._resolve_linked_references(link_ref, deployments) for link_ref in unresolved_linked_refs ) for linked_ref in resolved_linked_refs: validate_linked_references(linked_ref, on_chain_bytecode) return Deployments(deployments, all_contract_instances) @to_dict def _get_all_contract_instances( self, deployments: Dict[str, DeploymentData] ) -> Iterable[Tuple[str, Contract]]: for deployment_name, deployment_data in deployments.items(): if deployment_data['contractType'] not in self.contract_types: raise EthPMValidationError( f"Contract type: {deployment_data["contractType"]} for alias: " f"{deployment_name} not found. Available contract types include: " f"{self.contract_types}." ) contract_instance = self.get_contract_instance( ContractName(deployment_data['contractType']), deployment_data['address'], ) yield deployment_name, contract_instance @to_tuple def _resolve_linked_references( self, link_ref: Tuple[int, str, str], deployments: Dict[str, Any] ) -> Generator[Tuple[int, bytes], None, None]: # No nested deployment: i.e. 'Owned' offset, link_type, value = link_ref if link_type == "literal": yield offset, to_canonical_address(value) elif value in deployments: yield offset, to_canonical_address(deployments[value]["address"]) # No nested deployment, but invalid ref elif ":" not in value: raise BytecodeLinkingError( f"Contract instance reference: {value} not found in package's deployment data." ) # Expects child pkg in build_dependencies elif value.split(":")[0] not in self.build_dependencies: raise BytecodeLinkingError( f"Expected build dependency: {value.split(":")[0]} not found " "in package's build dependencies." ) # Find and return resolved, nested ref else: unresolved_linked_ref = value.split(":", 1)[-1] build_dependency = self.build_dependencies[value.split(":")[0]] yield build_dependency._resolve_link_dependencies(unresolved_linked_ref) def format_manifest(manifest: Manifest, *, prettify: bool = None) -> str: if prettify: return json.dumps(manifest, sort_keys=True, indent=4) return json.dumps(manifest, sort_keys=True, separators=(",", ":"))
import json from pathlib import ( Path, ) from typing import ( TYPE_CHECKING, Any, Dict, Generator, Iterable, List, Optional, Tuple, Type, Union, cast, ) from eth_typing import ( URI, Address, ContractName, Manifest, ) from eth_utils import ( to_canonical_address, to_dict, to_text, to_tuple, ) from ethpm._utils.cache import ( cached_property, ) from ethpm._utils.contract import ( generate_contract_factory_kwargs, ) from ethpm._utils.deployments import ( get_linked_deployments, normalize_linked_references, validate_deployments_tx_receipt, validate_linked_references, ) from ethpm.contract import ( LinkableContract, ) from ethpm.dependencies import ( Dependencies, ) from ethpm.deployments import ( DeploymentData, Deployments, ) from ethpm.exceptions import ( BytecodeLinkingError, EthPMValidationError, FailureToFetchIPFSAssetsError, InsufficientAssetsError, PyEthPMError, ) from ethpm.uri import ( resolve_uri_contents, ) from ethpm.validation.manifest import ( check_for_deployments, validate_build_dependencies_are_present, validate_manifest_against_schema, validate_manifest_deployments, validate_raw_manifest_format, ) from ethpm.validation.misc import ( validate_w3_instance, ) from ethpm.validation.package import ( validate_build_dependency, validate_contract_name, validate_minimal_contract_factory_data, ) from ethpm.validation.uri import ( validate_single_matching_uri, ) from web3._utils.validation import ( validate_address, ) from web3.eth import ( Contract, ) if TYPE_CHECKING: from web3 import Web3 # noqa: F401 class Package(object): def __init__( self, manifest: Dict[str, Any], w3: "Web3", uri: Optional[str] = None ) -> None: """ A package should be created using one of the available classmethods and a valid w3 instance. """ if not isinstance(manifest, dict): raise TypeError( "Package object must be initialized with a dictionary. " f"Got {type(manifest)}" ) if "manifest" not in manifest or manifest["manifest"] != "ethpm/3": raise EthPMValidationError( "Py-Ethpm currently only supports v3 ethpm manifests. " "Please use the CLI to update or re-generate a v3 manifest. " ) validate_manifest_against_schema(manifest) validate_manifest_deployments(manifest) validate_w3_instance(w3) self.w3 = w3 self.w3.eth.defaultContractFactory = cast(Type[Contract], LinkableContract) self.manifest = manifest self._uri = uri def update_w3(self, w3: "Web3") -> "Package": """ Returns a new instance of `Package` containing the same manifest, but connected to a different web3 instance. .. doctest:: >>> new_w3 = Web3(Web3.EthereumTesterProvider()) >>> NewPackage = OwnedPackage.update_w3(new_w3) >>> assert NewPackage.w3 == new_w3 >>> assert OwnedPackage.manifest == NewPackage.manifest """ validate_w3_instance(w3) return Package(self.manifest, w3, self.uri) def __repr__(self) -> str: """ String readable representation of the Package. .. doctest:: >>> OwnedPackage.__repr__() '<Package owned==1.0.0>' """ name = self.name version = self.version return f"<Package {name}=={version}>" @property def name(self) -> str: """ The name of this ``Package``. .. doctest:: >>> OwnedPackage.name 'owned' """ return self.manifest["name"] @property def version(self) -> str: """ The package version of a ``Package``. .. doctest:: >>> OwnedPackage.version '1.0.0' """ return self.manifest["version"] @property def manifest_version(self) -> str: """ The manifest version of a ``Package``. .. doctest:: >>> OwnedPackage.manifest_version 'ethpm/3' """ return self.manifest["manifest"] @property def uri(self) -> Optional[str]: """ The uri (local file_path / content-addressed URI) of a ``Package``'s manifest. """ return self._uri @property def contract_types(self) -> List[str]: """ All contract types included in this package. """ if 'contractTypes' in self.manifest: return sorted(self.manifest['contractTypes'].keys()) else: raise ValueError("No contract types found in manifest; {self.__repr__()}.") @classmethod def from_file(cls, file_path: Path, w3: "Web3") -> "Package": """ Returns a ``Package`` instantiated by a manifest located at the provided Path. ``file_path`` arg must be a ``pathlib.Path`` instance. A valid ``Web3`` instance is required to instantiate a ``Package``. """ if isinstance(file_path, Path): raw_manifest = file_path.read_text() validate_raw_manifest_format(raw_manifest) manifest = json.loads(raw_manifest) else: raise TypeError( "The Package.from_file method expects a pathlib.Path instance." f"Got {type(file_path)} instead." ) return cls(manifest, w3, file_path.as_uri()) @classmethod def from_uri(cls, uri: URI, w3: "Web3") -> "Package": """ Returns a Package object instantiated by a manifest located at a content-addressed URI. A valid ``Web3`` instance is also required. URI schemes supported: - IPFS: `ipfs://Qm...` - HTTP: `https://api.github.com/repos/:owner/:repo/git/blobs/:file_sha` - Registry: `erc1319://registry.eth:1/greeter?version=1.0.0` .. code:: python OwnedPackage = Package.from_uri('ipfs://QmbeVyFLSuEUxiXKwSsEjef7icpdTdA4kGG9BcrJXKNKUW', w3) # noqa: E501 """ contents = to_text(resolve_uri_contents(uri)) validate_raw_manifest_format(contents) manifest = json.loads(contents) return cls(manifest, w3, uri) # # Contracts # def get_contract_factory(self, name: ContractName) -> LinkableContract: """ Return the contract factory for a given contract type, generated from the data vailable in ``Package.manifest``. Contract factories are accessible from the package class. .. code:: python Owned = OwnedPackage.get_contract_factory('owned') In cases where a contract uses a library, the contract factory will have unlinked bytecode. The ``ethpm`` package ships with its own subclass of ``web3.contract.Contract``, ``ethpm.contract.LinkableContract`` with a few extra methods and properties related to bytecode linking. .. code:: python >>> math = owned_package.contract_factories.math >>> math.needs_bytecode_linking True >>> linked_math = math.link_bytecode({'MathLib': '0x1234...'}) >>> linked_math.needs_bytecode_linking False """ validate_contract_name(name) if "contractTypes" not in self.manifest: raise InsufficientAssetsError( "This package does not contain any contract type data." ) try: contract_data = self.manifest["contractTypes"][name] except KeyError: raise InsufficientAssetsError( "This package does not contain any package data to generate " f"a contract factory for contract type: {name}. Available contract types include: " f"{self.contract_types}." ) validate_minimal_contract_factory_data(contract_data) contract_kwargs = generate_contract_factory_kwargs(contract_data) contract_factory = self.w3.eth.contract(**contract_kwargs) return contract_factory def get_contract_instance(self, name: ContractName, address: Address) -> Contract: """ Will return a ``Web3.contract`` instance generated from the contract type data available in ``Package.manifest`` and the provided ``address``. The provided ``address`` must be valid on the connected chain available through ``Package.w3``. """ validate_address(address) validate_contract_name(name) try: self.manifest["contractTypes"][name]["abi"] except KeyError: raise InsufficientAssetsError( "Package does not have the ABI required to generate a contract instance " f"for contract: {name} at address: {address}." ) contract_kwargs = generate_contract_factory_kwargs( self.manifest["contractTypes"][name] ) contract_instance = self.w3.eth.contract( address=address, **contract_kwargs ) return contract_instance # # Build Dependencies # @cached_property def build_dependencies(self) -> "Dependencies": """ Return `Dependencies` instance containing the build dependencies available on this Package. The ``Package`` class should provide access to the full dependency tree. .. code:: python >>> owned_package.build_dependencies['zeppelin'] <ZeppelinPackage> """ validate_build_dependencies_are_present(self.manifest) dependencies = self.manifest["buildDependencies"] dependency_packages = {} for name, uri in dependencies.items(): try: validate_build_dependency(name, uri) dependency_package = Package.from_uri(uri, self.w3) except PyEthPMError as e: raise FailureToFetchIPFSAssetsError( f"Failed to retrieve build dependency: {name} from URI: {uri}.\n" f"Got error: {e}." ) else: dependency_packages[name] = dependency_package return Dependencies(dependency_packages) # # Deployments # @cached_property def deployments(self) -> Union["Deployments", Dict[None, None]]: """ Returns a ``Deployments`` object containing all the deployment data and contract instances of a ``Package``'s `contract_types`. Automatically filters deployments to only expose those available on the current ``Package.w3`` instance. .. code:: python package.deployments.get_instance("ContractType") """ if not check_for_deployments(self.manifest): return {} all_blockchain_uris = self.manifest["deployments"].keys() matching_uri = validate_single_matching_uri(all_blockchain_uris, self.w3) deployments = self.manifest["deployments"][matching_uri] all_contract_instances = self._get_all_contract_instances(deployments) validate_deployments_tx_receipt(deployments, self.w3, allow_missing_data=True) linked_deployments = get_linked_deployments(deployments) if linked_deployments: for deployment_data in linked_deployments.values(): on_chain_bytecode = self.w3.eth.get_code( deployment_data["address"] ) unresolved_linked_refs = normalize_linked_references( deployment_data["runtimeBytecode"]["linkDependencies"] ) resolved_linked_refs = tuple( self._resolve_linked_references(link_ref, deployments) for link_ref in unresolved_linked_refs ) for linked_ref in resolved_linked_refs: validate_linked_references(linked_ref, on_chain_bytecode) return Deployments(deployments, all_contract_instances) @to_dict def _get_all_contract_instances( self, deployments: Dict[str, DeploymentData] ) -> Iterable[Tuple[str, Contract]]: for deployment_name, deployment_data in deployments.items(): if deployment_data['contractType'] not in self.contract_types: raise EthPMValidationError( f"Contract type: {deployment_data['contractType']} for alias: " f"{deployment_name} not found. Available contract types include: " f"{self.contract_types}." ) contract_instance = self.get_contract_instance( ContractName(deployment_data['contractType']), deployment_data['address'], ) yield deployment_name, contract_instance @to_tuple def _resolve_linked_references( self, link_ref: Tuple[int, str, str], deployments: Dict[str, Any] ) -> Generator[Tuple[int, bytes], None, None]: # No nested deployment: i.e. 'Owned' offset, link_type, value = link_ref if link_type == "literal": yield offset, to_canonical_address(value) elif value in deployments: yield offset, to_canonical_address(deployments[value]["address"]) # No nested deployment, but invalid ref elif ":" not in value: raise BytecodeLinkingError( f"Contract instance reference: {value} not found in package's deployment data." ) # Expects child pkg in build_dependencies elif value.split(":")[0] not in self.build_dependencies: raise BytecodeLinkingError( f"Expected build dependency: {value.split(':')[0]} not found " "in package's build dependencies." ) # Find and return resolved, nested ref else: unresolved_linked_ref = value.split(":", 1)[-1] build_dependency = self.build_dependencies[value.split(":")[0]] yield build_dependency._resolve_link_dependencies(unresolved_linked_ref) def format_manifest(manifest: Manifest, *, prettify: bool = None) -> str: if prettify: return json.dumps(manifest, sort_keys=True, indent=4) return json.dumps(manifest, sort_keys=True, separators=(",", ":"))
# SPDX-License-Identifier: MIT # # Copyright (c) 2021 The Anvil Extras project team members listed at # https://github.com/anvilistas/anvil-extras/graphs/contributors # # This software is published at https://github.com/anvilistas/anvil-extras import random import anvil.js from anvil import Component as _Component from anvil import app as _app from anvil.js import get_dom_node as _get_dom_node from anvil.js.window import Promise as _Promise from anvil.js.window import document as _document __version__ = "2.1.0" _characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" class HTMLInjector: _injected_css = set() def css(self, css): """inject some custom css""" hashed = hash(css) if hashed in self._injected_css: return sheet = self._create_tag("style") sheet.innerHTML = css self._inject(sheet, head=False) self._injected_css.add(hashed) def cdn(self, cdn_url, **attrs): """inject a js/css cdn file""" if cdn_url.endswith("js"): tag = self._create_tag("script", src=cdn_url, **attrs) elif cdn_url.endswith("css"): tag = self._create_tag("link", href=cdn_url, rel="stylesheet", **attrs) else: raise ValueError("Unknown CDN type expected css or js file") self._inject(tag) self._wait_for_load(tag) def script(self, js): """inject some javascript code inside a script tag""" s = self._create_tag("script") s.textContent = js self._inject(s) def _create_tag(self, tag_name, **attrs): tag = _document.createElement(tag_name) for attr, value in attrs.items(): tag.setAttribute(attr, value) return tag def _inject(self, tag, head=True): if head: _document.head.appendChild(tag) else: _document.body.appendChild(tag) def _wait_for_load(self, tag): if not tag.get("src"): return def do_wait(res, rej): tag.onload = res tag.onerror = rej p = _Promise(do_wait) anvil.js.await_promise(p) _html_injector = HTMLInjector() def _get_dom_node_id(component): node = _get_dom_node(component) if not node.id: node.id = "".join([random.choice(_characters) for _ in range(8)]) return node.id def _spacing_property(a_b): def getter(self): return getattr(self, "_spacing_" + a_b) def setter(self, value): self._dom_node.classList.remove( f"anvil-spacing-{a_b}-{getattr(self, "_spacing_" + a_b, "")}" ) self._dom_node.classList.add(f"anvil-spacing-{a_b}-{value}") setattr(self, "_spacing_" + a_b, value) return property(getter, setter, None, a_b) _primary = _app.theme_colors.get("Primary 500", "#2196F3") def _get_color(value): if not value: return _primary elif value.startswith("theme:"): return _app.theme_colors.get(value.replace("theme:", ""), _primary) else: return value def _get_rgb(value): value = _get_color(value) if value.startswith("#"): value = value[1:] value = ",".join(str(int(value[i : i + 2], 16)) for i in (0, 2, 4)) elif value.startswith("rgb") and value.endswith(")"): value = value[value.find("("), -1] else: raise ValueError( f"expected a hex value, theme color or rgb value, not, {value}" ) return value def _walker(children): for child in children: yield child get_children = getattr(child, "get_components", None) if get_children is not None: yield from _walker(get_children()) def walk(component_or_components): """yields the component(s) passed in and all their children""" if isinstance(component_or_components, _Component): component_or_components = [component_or_components] yield from _walker(component_or_components)
# SPDX-License-Identifier: MIT # # Copyright (c) 2021 The Anvil Extras project team members listed at # https://github.com/anvilistas/anvil-extras/graphs/contributors # # This software is published at https://github.com/anvilistas/anvil-extras import random import anvil.js from anvil import Component as _Component from anvil import app as _app from anvil.js import get_dom_node as _get_dom_node from anvil.js.window import Promise as _Promise from anvil.js.window import document as _document __version__ = "2.1.0" _characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" class HTMLInjector: _injected_css = set() def css(self, css): """inject some custom css""" hashed = hash(css) if hashed in self._injected_css: return sheet = self._create_tag("style") sheet.innerHTML = css self._inject(sheet, head=False) self._injected_css.add(hashed) def cdn(self, cdn_url, **attrs): """inject a js/css cdn file""" if cdn_url.endswith("js"): tag = self._create_tag("script", src=cdn_url, **attrs) elif cdn_url.endswith("css"): tag = self._create_tag("link", href=cdn_url, rel="stylesheet", **attrs) else: raise ValueError("Unknown CDN type expected css or js file") self._inject(tag) self._wait_for_load(tag) def script(self, js): """inject some javascript code inside a script tag""" s = self._create_tag("script") s.textContent = js self._inject(s) def _create_tag(self, tag_name, **attrs): tag = _document.createElement(tag_name) for attr, value in attrs.items(): tag.setAttribute(attr, value) return tag def _inject(self, tag, head=True): if head: _document.head.appendChild(tag) else: _document.body.appendChild(tag) def _wait_for_load(self, tag): if not tag.get("src"): return def do_wait(res, rej): tag.onload = res tag.onerror = rej p = _Promise(do_wait) anvil.js.await_promise(p) _html_injector = HTMLInjector() def _get_dom_node_id(component): node = _get_dom_node(component) if not node.id: node.id = "".join([random.choice(_characters) for _ in range(8)]) return node.id def _spacing_property(a_b): def getter(self): return getattr(self, "_spacing_" + a_b) def setter(self, value): self._dom_node.classList.remove( f"anvil-spacing-{a_b}-{getattr(self, '_spacing_' + a_b, '')}" ) self._dom_node.classList.add(f"anvil-spacing-{a_b}-{value}") setattr(self, "_spacing_" + a_b, value) return property(getter, setter, None, a_b) _primary = _app.theme_colors.get("Primary 500", "#2196F3") def _get_color(value): if not value: return _primary elif value.startswith("theme:"): return _app.theme_colors.get(value.replace("theme:", ""), _primary) else: return value def _get_rgb(value): value = _get_color(value) if value.startswith("#"): value = value[1:] value = ",".join(str(int(value[i : i + 2], 16)) for i in (0, 2, 4)) elif value.startswith("rgb") and value.endswith(")"): value = value[value.find("("), -1] else: raise ValueError( f"expected a hex value, theme color or rgb value, not, {value}" ) return value def _walker(children): for child in children: yield child get_children = getattr(child, "get_components", None) if get_children is not None: yield from _walker(get_children()) def walk(component_or_components): """yields the component(s) passed in and all their children""" if isinstance(component_or_components, _Component): component_or_components = [component_or_components] yield from _walker(component_or_components)
"""Command line interface including input and output.""" import argparse import sys from seedrecover.wordlist import Wordlist from seedrecover.order import iterate from seedrecover.keyderiv import seed_to_stakeaddress, ChecksumError from seedrecover.stakecheck import StakeAddresses from seedrecover.bfstakecheck import BlockFrost, InactiveError from typing import List, Optional def parse_args(prog: Optional[str] = None) -> argparse.Namespace: """Parse the command line arguments.""" description = "recover BIP-39 mnemonic seed phrases" if prog: parser = argparse.ArgumentParser(prog=prog, description=description) else: parser = argparse.ArgumentParser(description=description) parser.add_argument("seed", nargs="*", help="known words of seed phrase", metavar="WORD") parser.add_argument("-w", "--wordlist", help="wordlist to use (default: english)", metavar="FILE") parser.add_argument("-s", "--similar", type=int, default=0, help="try similar words up to edit distance", metavar="EDIT DISTANCE") parser.add_argument("-o", "--order", default=False, action="store_true", help="try different orders of seed words") parser.add_argument("-l", "--length", type=int, help="length of seed phrase", metavar="LENGTH") parser.add_argument("-m", "--missing", type=int, nargs="+", default=[], help="missing word positions", metavar="POSITION") parser.add_argument("-a", "--address", nargs="+", default=[], help="check for stake addresses", metavar="ADDRESS") parser.add_argument("-b", "--blockfrost", help="check on BlockFrost", metavar="API KEY") return parser.parse_args() def get_seed(wordlist: Wordlist, known: List[str], similar: int) -> List[List[str]]: """Determine possible seed words from given known words.""" seed = [] for word in known: if not wordlist.contains(word): print(f"'{word}' not in wordlist!", file=sys.stderr) words = wordlist.get_words(word, similar) print(f"{word} => {", ".join(words)}", file=sys.stderr) seed.append(words) return seed def get_length(length: Optional[int], known: int) -> int: """Determine length of seed phrase to search for.""" if not length: print("Length not set. Using smallest length for given phrase.", file=sys.stderr) length = known if known % 3: length += 3 - known % 3 if length % 3 != 0: print("Length is not a multiple of 3!", file=sys.stderr) exit(1) if known > length: print("More known words than length given!", file=sys.stderr) exit(1) print(f"{length - known} of {length} words missing.", file=sys.stderr) return length def get_missing_positions(given: List[int], length: int, missing: int) -> List[int]: """Check or determine positions for missing words.""" missing_positions = [i-1 for i in given] if not missing_positions: missing_positions = list(range(length)) if len(missing_positions) < missing: print(f"Only {len(missing_positions)} positions given" " for missing words!", file=sys.stderr) exit(1) return missing_positions def main(prog: Optional[str] = None) -> None: """Execute the main control flow of the seedrecover script.""" args = parse_args(prog) wordlist = Wordlist(args.wordlist) seed = get_seed(wordlist, args.seed, args.similar) length = get_length(args.length, len(seed)) missing_positions = get_missing_positions(args.missing, length, length - len(seed)) sc = None if args.address: sc = StakeAddresses(args.address) bf = None if args.blockfrost: try: bf = BlockFrost(args.blockfrost) except InactiveError as e: print(e, file=sys.stderr) bf = None already_checked = set() total_seed_phrases = 0 checksum_seed_phrases = 0 norepeat_seed_phrases = 0 for seed_phrase in iterate(seed, args.order, wordlist, length, missing_positions): okay = True total_seed_phrases += 1 try: stake_address = seed_to_stakeaddress(seed_phrase, wordlist) except ChecksumError: okay = False if okay: checksum_seed_phrases += 1 if stake_address in already_checked: okay = False else: already_checked.add(stake_address) norepeat_seed_phrases += 1 print(f"Seed phrases checked: {total_seed_phrases:10_} total, " f"{checksum_seed_phrases:10_} fulfilled checksum, " f"{norepeat_seed_phrases:10_} without repetitions", file=sys.stderr, end="\r") if not okay: continue searched = False active = False verbose = True if sc: if sc.check_stake_address(stake_address): searched = True verbose = False if bf: try: if bf.check_stake_address(stake_address): active = True verbose = False except InactiveError as e: print(e, file=sys.stderr) bf = None if searched or active or verbose: print(file=sys.stderr) if searched and active: print("Searched and active stake address found:") elif searched: print("Searched stake address found:") elif active: print("Active stake address found:") print(f"{stake_address}: {" ".join(seed_phrase)}") print(file=sys.stderr)
"""Command line interface including input and output.""" import argparse import sys from seedrecover.wordlist import Wordlist from seedrecover.order import iterate from seedrecover.keyderiv import seed_to_stakeaddress, ChecksumError from seedrecover.stakecheck import StakeAddresses from seedrecover.bfstakecheck import BlockFrost, InactiveError from typing import List, Optional def parse_args(prog: Optional[str] = None) -> argparse.Namespace: """Parse the command line arguments.""" description = "recover BIP-39 mnemonic seed phrases" if prog: parser = argparse.ArgumentParser(prog=prog, description=description) else: parser = argparse.ArgumentParser(description=description) parser.add_argument("seed", nargs="*", help="known words of seed phrase", metavar="WORD") parser.add_argument("-w", "--wordlist", help="wordlist to use (default: english)", metavar="FILE") parser.add_argument("-s", "--similar", type=int, default=0, help="try similar words up to edit distance", metavar="EDIT DISTANCE") parser.add_argument("-o", "--order", default=False, action="store_true", help="try different orders of seed words") parser.add_argument("-l", "--length", type=int, help="length of seed phrase", metavar="LENGTH") parser.add_argument("-m", "--missing", type=int, nargs="+", default=[], help="missing word positions", metavar="POSITION") parser.add_argument("-a", "--address", nargs="+", default=[], help="check for stake addresses", metavar="ADDRESS") parser.add_argument("-b", "--blockfrost", help="check on BlockFrost", metavar="API KEY") return parser.parse_args() def get_seed(wordlist: Wordlist, known: List[str], similar: int) -> List[List[str]]: """Determine possible seed words from given known words.""" seed = [] for word in known: if not wordlist.contains(word): print(f"'{word}' not in wordlist!", file=sys.stderr) words = wordlist.get_words(word, similar) print(f"{word} => {', '.join(words)}", file=sys.stderr) seed.append(words) return seed def get_length(length: Optional[int], known: int) -> int: """Determine length of seed phrase to search for.""" if not length: print("Length not set. Using smallest length for given phrase.", file=sys.stderr) length = known if known % 3: length += 3 - known % 3 if length % 3 != 0: print("Length is not a multiple of 3!", file=sys.stderr) exit(1) if known > length: print("More known words than length given!", file=sys.stderr) exit(1) print(f"{length - known} of {length} words missing.", file=sys.stderr) return length def get_missing_positions(given: List[int], length: int, missing: int) -> List[int]: """Check or determine positions for missing words.""" missing_positions = [i-1 for i in given] if not missing_positions: missing_positions = list(range(length)) if len(missing_positions) < missing: print(f"Only {len(missing_positions)} positions given" " for missing words!", file=sys.stderr) exit(1) return missing_positions def main(prog: Optional[str] = None) -> None: """Execute the main control flow of the seedrecover script.""" args = parse_args(prog) wordlist = Wordlist(args.wordlist) seed = get_seed(wordlist, args.seed, args.similar) length = get_length(args.length, len(seed)) missing_positions = get_missing_positions(args.missing, length, length - len(seed)) sc = None if args.address: sc = StakeAddresses(args.address) bf = None if args.blockfrost: try: bf = BlockFrost(args.blockfrost) except InactiveError as e: print(e, file=sys.stderr) bf = None already_checked = set() total_seed_phrases = 0 checksum_seed_phrases = 0 norepeat_seed_phrases = 0 for seed_phrase in iterate(seed, args.order, wordlist, length, missing_positions): okay = True total_seed_phrases += 1 try: stake_address = seed_to_stakeaddress(seed_phrase, wordlist) except ChecksumError: okay = False if okay: checksum_seed_phrases += 1 if stake_address in already_checked: okay = False else: already_checked.add(stake_address) norepeat_seed_phrases += 1 print(f"Seed phrases checked: {total_seed_phrases:10_} total, " f"{checksum_seed_phrases:10_} fulfilled checksum, " f"{norepeat_seed_phrases:10_} without repetitions", file=sys.stderr, end="\r") if not okay: continue searched = False active = False verbose = True if sc: if sc.check_stake_address(stake_address): searched = True verbose = False if bf: try: if bf.check_stake_address(stake_address): active = True verbose = False except InactiveError as e: print(e, file=sys.stderr) bf = None if searched or active or verbose: print(file=sys.stderr) if searched and active: print("Searched and active stake address found:") elif searched: print("Searched stake address found:") elif active: print("Active stake address found:") print(f"{stake_address}: {' '.join(seed_phrase)}") print(file=sys.stderr)
#!/usr/bin/env python3 # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import argparse from ast import arg import copy import json import logging import os import re import shlex import shutil import subprocess import sys import pathlib _LOG = logging.getLogger(__name__) THIS_DIR = os.path.realpath(os.path.dirname(__file__) or ".") # List of vagrant providers supported by this tool ALL_PROVIDERS = ( "parallels", "virtualbox", "vmware_desktop", ) # List of supported electronics platforms. Each must correspond # to a sub-directory of this directory. ALL_PLATFORMS = ( "arduino", "zephyr", ) # Extra scripts required to execute on provisioning # in [platform]/base-box/base_box_provision.sh EXTRA_SCRIPTS = { "arduino": (), "zephyr": ( "docker/install/ubuntu_init_zephyr_project.sh", "docker/install/ubuntu_install_zephyr_sdk.sh", ), } PACKER_FILE_NAME = "packer.json" # List of identifying strings for microTVM boards for testing. with open(pathlib.Path(THIS_DIR) / ".." / "zephyr" / "template_project" / "boards.json") as f: zephyr_boards = json.load(f) with open(pathlib.Path(THIS_DIR) / ".." / "arduino" / "template_project" / "boards.json") as f: arduino_boards = json.load(f) ALL_MICROTVM_BOARDS = { "arduino": arduino_boards.keys(), "zephyr": zephyr_boards.keys(), } def parse_virtualbox_devices(): output = subprocess.check_output(["VBoxManage", "list", "usbhost"], encoding="utf-8") devices = [] current_dev = {} for line in output.split("\n"): if not line.strip(): if current_dev: if "VendorId" in current_dev and "ProductId" in current_dev: devices.append(current_dev) current_dev = {} continue key, value = line.split(":", 1) value = value.lstrip(" ") current_dev[key] = value if current_dev: devices.append(current_dev) return devices VIRTUALBOX_USB_DEVICE_RE = ( "USBAttachVendorId[0-9]+=0x([0-9a-z]{4})\n" + "USBAttachProductId[0-9]+=0x([0-9a-z]{4})" ) def parse_virtualbox_attached_usb_devices(vm_uuid): output = subprocess.check_output( ["VBoxManage", "showvminfo", "--machinereadable", vm_uuid], encoding="utf-8" ) r = re.compile(VIRTUALBOX_USB_DEVICE_RE) attached_usb_devices = r.findall(output, re.MULTILINE) # List of couples (VendorId, ProductId) for all attached USB devices return attached_usb_devices VIRTUALBOX_VID_PID_RE = re.compile(r"0x([0-9A-Fa-f]{4}).*") def attach_virtualbox(vm_uuid, vid_hex=None, pid_hex=None, serial=None): usb_devices = parse_virtualbox_devices() for dev in usb_devices: m = VIRTUALBOX_VID_PID_RE.match(dev["VendorId"]) if not m: _LOG.warning("Malformed VendorId: %s", dev["VendorId"]) continue dev_vid_hex = m.group(1).lower() m = VIRTUALBOX_VID_PID_RE.match(dev["ProductId"]) if not m: _LOG.warning("Malformed ProductId: %s", dev["ProductId"]) continue dev_pid_hex = m.group(1).lower() if ( vid_hex == dev_vid_hex and pid_hex == dev_pid_hex and (serial is None or serial == dev["SerialNumber"]) ): attached_devices = parse_virtualbox_attached_usb_devices(vm_uuid) for vid, pid in parse_virtualbox_attached_usb_devices(vm_uuid): if vid_hex == vid and pid_hex == pid: print(f"USB dev {vid_hex}:{pid_hex} already attached. Skipping attach.") return rule_args = [ "VBoxManage", "usbfilter", "add", "0", "--action", "hold", "--name", "test device", "--target", vm_uuid, "--vendorid", vid_hex, "--productid", pid_hex, ] if serial is not None: rule_args.extend(["--serialnumber", serial]) subprocess.check_call(rule_args) subprocess.check_call(["VBoxManage", "controlvm", vm_uuid, "usbattach", dev["UUID"]]) return raise Exception( f"Device with vid={vid_hex}, pid={pid_hex}, serial={serial!r} not found:\n{usb_devices!r}" ) def attach_parallels(uuid, vid_hex=None, pid_hex=None, serial=None): usb_devices = json.loads( subprocess.check_output(["prlsrvctl", "usb", "list", "-j"], encoding="utf-8") ) for dev in usb_devices: _, dev_vid_hex, dev_pid_hex, _, _, dev_serial = dev["System name"].split("|") dev_vid_hex = dev_vid_hex.lower() dev_pid_hex = dev_pid_hex.lower() if ( vid_hex == dev_vid_hex and pid_hex == dev_pid_hex and (serial is None or serial == dev_serial) ): subprocess.check_call(["prlsrvctl", "usb", "set", dev["Name"], uuid]) if "Used-By-Vm-Name" in dev: subprocess.check_call( ["prlctl", "set", dev["Used-By-Vm-Name"], "--device-disconnect", dev["Name"]] ) subprocess.check_call(["prlctl", "set", uuid, "--device-connect", dev["Name"]]) return raise Exception( f"Device with vid={vid_hex}, pid={pid_hex}, serial={serial!r} not found:\n{usb_devices!r}" ) def attach_vmware(uuid, vid_hex=None, pid_hex=None, serial=None): print("NOTE: vmware doesn't seem to support automatic attaching of devices :(") print("The VMWare VM UUID is {uuid}") print("Please attach the following usb device using the VMWare GUI:") if vid_hex is not None: print(f" - VID: {vid_hex}") if pid_hex is not None: print(f" - PID: {pid_hex}") if serial is not None: print(f" - Serial: {serial}") if vid_hex is None and pid_hex is None and serial is None: print(" - (no specifications given for USB device)") print() print("Press [Enter] when the USB device is attached") input() ATTACH_USB_DEVICE = { "parallels": attach_parallels, "virtualbox": attach_virtualbox, "vmware_desktop": attach_vmware, } def generate_packer_config(platform, file_path, providers): builders = [] provisioners = [] for provider_name in providers: builders.append( { "name": f"{provider_name}", "type": "vagrant", "box_name": f"microtvm-base-{provider_name}", "output_dir": f"output-packer-{provider_name}", "communicator": "ssh", "source_path": "generic/ubuntu1804", "provider": provider_name, "template": "Vagrantfile.packer-template", } ) repo_root = subprocess.check_output( ["git", "rev-parse", "--show-toplevel"], encoding="utf-8" ).strip() for script in EXTRA_SCRIPTS[platform]: script_path = os.path.join(repo_root, script) filename = os.path.basename(script_path) provisioners.append({"type": "file", "source": script_path, "destination": f"~/{filename}"}) provisioners.append( { "type": "shell", "script": "base_box_setup.sh", } ) provisioners.append( { "type": "shell", "script": "base_box_provision.sh", } ) with open(file_path, "w") as f: json.dump( { "builders": builders, "provisioners": provisioners, }, f, sort_keys=True, indent=2, ) def build_command(args): generate_packer_config( args.platform, os.path.join(THIS_DIR, args.platform, "base-box", PACKER_FILE_NAME), args.provider or ALL_PROVIDERS, ) env = copy.copy(os.environ) packer_args = ["packer", "build"] env["PACKER_LOG"] = "1" env["PACKER_LOG_PATH"] = "packer.log" if args.debug_packer: packer_args += ["-debug"] packer_args += [PACKER_FILE_NAME] subprocess.check_call( packer_args, cwd=os.path.join(THIS_DIR, args.platform, "base-box"), env=env ) REQUIRED_TEST_CONFIG_KEYS = { "vid_hex": str, "pid_hex": str, } VM_BOX_RE = re.compile(r'(.*\.vm\.box) = "(.*)"') # Paths, relative to the platform box directory, which will not be copied to release-test dir. SKIP_COPY_PATHS = [".vagrant", "base-box"] def do_build_release_test_vm( release_test_dir, user_box_dir: pathlib.Path, base_box_dir: pathlib.Path, provider_name ): if os.path.exists(release_test_dir): try: subprocess.check_call(["vagrant", "destroy", "-f"], cwd=release_test_dir) except subprocess.CalledProcessError: _LOG.warning("vagrant destroy failed--removing dirtree anyhow", exc_info=True) shutil.rmtree(release_test_dir) for dirpath, _, filenames in os.walk(user_box_dir): rel_path = os.path.relpath(dirpath, user_box_dir) if any( rel_path == scp or rel_path.startswith(f"{scp}{os.path.sep}") for scp in SKIP_COPY_PATHS ): continue dest_dir = os.path.join(release_test_dir, rel_path) os.makedirs(dest_dir) for filename in filenames: shutil.copy2(os.path.join(dirpath, filename), os.path.join(dest_dir, filename)) release_test_vagrantfile = os.path.join(release_test_dir, "Vagrantfile") with open(release_test_vagrantfile) as f: lines = list(f) found_box_line = False with open(release_test_vagrantfile, "w") as f: for line in lines: m = VM_BOX_RE.match(line) if not m: f.write(line) continue box_package = os.path.join( base_box_dir, f"output-packer-{provider_name}", "package.box" ) box_relpath = os.path.relpath(box_package, release_test_dir) f.write(f'{m.group(1)} = "{box_relpath}"\n') found_box_line = True if not found_box_line: _LOG.error( "testing provider %s: couldn't find config.box.vm = line in Vagrantfile; unable to test", provider_name, ) return False # Delete the old box registered with Vagrant, which may lead to a falsely-passing release test. remove_args = ["vagrant", "box", "remove", box_relpath] return_code = subprocess.call(remove_args, cwd=release_test_dir) assert return_code in (0, 1), f'{' '.join(remove_args)} returned exit code {return_code}' subprocess.check_call(["vagrant", "up", f"--provider={provider_name}"], cwd=release_test_dir) return True def do_run_release_test(release_test_dir, platform, provider_name, test_config, test_device_serial): with open( os.path.join(release_test_dir, ".vagrant", "machines", "default", provider_name, "id") ) as f: machine_uuid = f.read() # Check if target is not QEMU if test_config["vid_hex"] and test_config["pid_hex"]: ATTACH_USB_DEVICE[provider_name]( machine_uuid, vid_hex=test_config["vid_hex"], pid_hex=test_config["pid_hex"], serial=test_device_serial, ) tvm_home = os.path.realpath(os.path.join(THIS_DIR, "..", "..", "..")) def _quote_cmd(cmd): return " ".join(shlex.quote(a) for a in cmd) test_cmd = ( _quote_cmd(["cd", tvm_home]) + " && " + _quote_cmd( [ f"apps/microtvm/reference-vm/{platform}/base-box/base_box_test.sh", test_config["microtvm_board"], ] ) ) subprocess.check_call(["vagrant", "ssh", "-c", f"bash -ec '{test_cmd}'"], cwd=release_test_dir) def test_command(args): user_box_dir = pathlib.Path(THIS_DIR) / args.platform base_box_dir = user_box_dir / "base-box" boards_file = pathlib.Path(THIS_DIR) / ".." / args.platform / "template_project" / "boards.json" with open(boards_file) as f: test_config = json.load(f) # select microTVM test config microtvm_test_config = test_config[args.microtvm_board] for key, expected_type in REQUIRED_TEST_CONFIG_KEYS.items(): assert key in microtvm_test_config and isinstance( microtvm_test_config[key], expected_type ), f"Expected key {key} of type {expected_type} in {boards_file}: {test_config!r}" microtvm_test_config["vid_hex"] = microtvm_test_config["vid_hex"].lower() microtvm_test_config["pid_hex"] = microtvm_test_config["pid_hex"].lower() microtvm_test_config["microtvm_board"] = args.microtvm_board providers = args.provider release_test_dir = os.path.join(THIS_DIR, f"release-test-{args.platform}") if args.skip_build or args.skip_destroy: assert ( len(providers) == 1 ), "--skip-build and/or --skip-destroy was given, but >1 provider specified" test_failed = False for provider_name in providers: try: if not args.skip_build: do_build_release_test_vm( release_test_dir, user_box_dir, base_box_dir, provider_name ) do_run_release_test( release_test_dir, args.platform, provider_name, microtvm_test_config, args.test_device_serial, ) except subprocess.CalledProcessError: test_failed = True sys.exit( f"\n\nERROR: Provider '{provider_name}' failed the release test. " "You can re-run it to reproduce the issue without building everything " "again by passing the --skip-build and specifying only the provider that failed. " "The VM is still running in case you want to connect it via SSH to " "investigate further the issue, thus it's necessary to destroy it manually " "to release the resources back to the host, like a USB device attached to the VM." ) finally: # if we reached out here do_run_release_test() succeeded, hence we can # destroy the VM and release the resources back to the host if user haven't # requested to not destroy it. if not (args.skip_destroy or test_failed): subprocess.check_call(["vagrant", "destroy", "-f"], cwd=release_test_dir) shutil.rmtree(release_test_dir) print(f'\n\nThe release tests passed on all specified providers: {', '.join(providers)}.') def release_command(args): if args.release_full_name: vm_name = args.release_full_name else: vm_name = f"tlcpack/microtvm-{args.platform}-{args.platform_version}" if not args.skip_creating_release_version: subprocess.check_call( [ "vagrant", "cloud", "version", "create", vm_name, args.release_version, ] ) if not args.release_version: sys.exit(f"--release-version must be specified") for provider_name in args.provider: subprocess.check_call( [ "vagrant", "cloud", "publish", "-f", vm_name, args.release_version, provider_name, os.path.join( THIS_DIR, args.platform, "base-box", f"output-packer-{provider_name}/package.box", ), ] ) def parse_args(): parser = argparse.ArgumentParser( description="Automates building, testing, and releasing a base box" ) subparsers = parser.add_subparsers(help="Action to perform.") subparsers.required = True subparsers.dest = "action" parser.add_argument( "--provider", choices=ALL_PROVIDERS, action="append", required=True, help="Name of the provider or providers to act on", ) # "test" has special options for different platforms, and "build", "release" might # in the future, so we'll add the platform argument to each one individually. platform_help_str = "Platform to use (e.g. Arduino, Zephyr)" # Options for build subcommand parser_build = subparsers.add_parser("build", help="Build a base box.") parser_build.set_defaults(func=build_command) parser_build.add_argument("platform", help=platform_help_str, choices=ALL_PLATFORMS) parser_build.add_argument( "--debug-packer", action="store_true", help=("Run packer in debug mode, and write log to the base-box directory."), ) # Options for test subcommand parser_test = subparsers.add_parser("test", help="Test a base box before release.") parser_test.set_defaults(func=test_command) parser_test.add_argument( "--skip-build", action="store_true", help=( "If given, assume a box has already been built in the release-test subdirectory, " "so use that box to execute the release test script. If the tests fail the VM used " "for testing will be left running for further investigation and will need to be " "destroyed manually. If all tests pass on all specified providers no VM is left running, " "unless --skip-destroy is given too." ), ) parser_test.add_argument( "--skip-destroy", action="store_true", help=( "Skip destroying the test VM even if all tests pass. Can only be used if a single " "provider is specified. Default is to destroy the VM if all tests pass (and always " "skip destroying it if a test fails)." ), ) parser_test.add_argument( "--test-device-serial", help=( "If given, attach the test device with this USB serial number. Corresponds to the " "iSerial field from `lsusb -v` output." ), ) parser_test_platform_subparsers = parser_test.add_subparsers(help=platform_help_str) for platform in ALL_PLATFORMS: platform_specific_parser = parser_test_platform_subparsers.add_parser(platform) platform_specific_parser.set_defaults(platform=platform) platform_specific_parser.add_argument( "--microtvm-board", choices=ALL_MICROTVM_BOARDS[platform], required=True, help="MicroTVM board used for testing.", ) # Options for release subcommand parser_release = subparsers.add_parser("release", help="Release base box to cloud.") parser_release.set_defaults(func=release_command) parser_release.add_argument("platform", help=platform_help_str, choices=ALL_PLATFORMS) parser_release.add_argument( "--release-version", required=True, help="Version to release, in the form 'x.y.z'. Must be specified with release.", ) parser_release.add_argument( "--skip-creating-release-version", action="store_true", help="Skip creating the version and just upload for this provider.", ) parser_release.add_argument( "--platform-version", required=False, help=( "For Zephyr, the platform version to release, in the form 'x.y'. " "For Arduino, the version of arduino-cli that's being used, in the form 'x.y.z'." ), ) parser_release.add_argument( "--release-full-name", required=False, type=str, default=None, help=( "If set, it will use this as the full release name and version for the box. " "If this set, it will ignore `--platform-version` and `--release-version`." ), ) args = parser.parse_args() if args.action == "release" and not args.release_full_name: parser.error("--platform-version is requireed.") return args def main(): args = parse_args() if os.path.sep in args.platform or not os.path.isdir(os.path.join(THIS_DIR, args.platform)): sys.exit(f"<platform> must be a sub-direcotry of {THIS_DIR}; got {args.platform}") args.func(args) if __name__ == "__main__": main()
#!/usr/bin/env python3 # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import argparse from ast import arg import copy import json import logging import os import re import shlex import shutil import subprocess import sys import pathlib _LOG = logging.getLogger(__name__) THIS_DIR = os.path.realpath(os.path.dirname(__file__) or ".") # List of vagrant providers supported by this tool ALL_PROVIDERS = ( "parallels", "virtualbox", "vmware_desktop", ) # List of supported electronics platforms. Each must correspond # to a sub-directory of this directory. ALL_PLATFORMS = ( "arduino", "zephyr", ) # Extra scripts required to execute on provisioning # in [platform]/base-box/base_box_provision.sh EXTRA_SCRIPTS = { "arduino": (), "zephyr": ( "docker/install/ubuntu_init_zephyr_project.sh", "docker/install/ubuntu_install_zephyr_sdk.sh", ), } PACKER_FILE_NAME = "packer.json" # List of identifying strings for microTVM boards for testing. with open(pathlib.Path(THIS_DIR) / ".." / "zephyr" / "template_project" / "boards.json") as f: zephyr_boards = json.load(f) with open(pathlib.Path(THIS_DIR) / ".." / "arduino" / "template_project" / "boards.json") as f: arduino_boards = json.load(f) ALL_MICROTVM_BOARDS = { "arduino": arduino_boards.keys(), "zephyr": zephyr_boards.keys(), } def parse_virtualbox_devices(): output = subprocess.check_output(["VBoxManage", "list", "usbhost"], encoding="utf-8") devices = [] current_dev = {} for line in output.split("\n"): if not line.strip(): if current_dev: if "VendorId" in current_dev and "ProductId" in current_dev: devices.append(current_dev) current_dev = {} continue key, value = line.split(":", 1) value = value.lstrip(" ") current_dev[key] = value if current_dev: devices.append(current_dev) return devices VIRTUALBOX_USB_DEVICE_RE = ( "USBAttachVendorId[0-9]+=0x([0-9a-z]{4})\n" + "USBAttachProductId[0-9]+=0x([0-9a-z]{4})" ) def parse_virtualbox_attached_usb_devices(vm_uuid): output = subprocess.check_output( ["VBoxManage", "showvminfo", "--machinereadable", vm_uuid], encoding="utf-8" ) r = re.compile(VIRTUALBOX_USB_DEVICE_RE) attached_usb_devices = r.findall(output, re.MULTILINE) # List of couples (VendorId, ProductId) for all attached USB devices return attached_usb_devices VIRTUALBOX_VID_PID_RE = re.compile(r"0x([0-9A-Fa-f]{4}).*") def attach_virtualbox(vm_uuid, vid_hex=None, pid_hex=None, serial=None): usb_devices = parse_virtualbox_devices() for dev in usb_devices: m = VIRTUALBOX_VID_PID_RE.match(dev["VendorId"]) if not m: _LOG.warning("Malformed VendorId: %s", dev["VendorId"]) continue dev_vid_hex = m.group(1).lower() m = VIRTUALBOX_VID_PID_RE.match(dev["ProductId"]) if not m: _LOG.warning("Malformed ProductId: %s", dev["ProductId"]) continue dev_pid_hex = m.group(1).lower() if ( vid_hex == dev_vid_hex and pid_hex == dev_pid_hex and (serial is None or serial == dev["SerialNumber"]) ): attached_devices = parse_virtualbox_attached_usb_devices(vm_uuid) for vid, pid in parse_virtualbox_attached_usb_devices(vm_uuid): if vid_hex == vid and pid_hex == pid: print(f"USB dev {vid_hex}:{pid_hex} already attached. Skipping attach.") return rule_args = [ "VBoxManage", "usbfilter", "add", "0", "--action", "hold", "--name", "test device", "--target", vm_uuid, "--vendorid", vid_hex, "--productid", pid_hex, ] if serial is not None: rule_args.extend(["--serialnumber", serial]) subprocess.check_call(rule_args) subprocess.check_call(["VBoxManage", "controlvm", vm_uuid, "usbattach", dev["UUID"]]) return raise Exception( f"Device with vid={vid_hex}, pid={pid_hex}, serial={serial!r} not found:\n{usb_devices!r}" ) def attach_parallels(uuid, vid_hex=None, pid_hex=None, serial=None): usb_devices = json.loads( subprocess.check_output(["prlsrvctl", "usb", "list", "-j"], encoding="utf-8") ) for dev in usb_devices: _, dev_vid_hex, dev_pid_hex, _, _, dev_serial = dev["System name"].split("|") dev_vid_hex = dev_vid_hex.lower() dev_pid_hex = dev_pid_hex.lower() if ( vid_hex == dev_vid_hex and pid_hex == dev_pid_hex and (serial is None or serial == dev_serial) ): subprocess.check_call(["prlsrvctl", "usb", "set", dev["Name"], uuid]) if "Used-By-Vm-Name" in dev: subprocess.check_call( ["prlctl", "set", dev["Used-By-Vm-Name"], "--device-disconnect", dev["Name"]] ) subprocess.check_call(["prlctl", "set", uuid, "--device-connect", dev["Name"]]) return raise Exception( f"Device with vid={vid_hex}, pid={pid_hex}, serial={serial!r} not found:\n{usb_devices!r}" ) def attach_vmware(uuid, vid_hex=None, pid_hex=None, serial=None): print("NOTE: vmware doesn't seem to support automatic attaching of devices :(") print("The VMWare VM UUID is {uuid}") print("Please attach the following usb device using the VMWare GUI:") if vid_hex is not None: print(f" - VID: {vid_hex}") if pid_hex is not None: print(f" - PID: {pid_hex}") if serial is not None: print(f" - Serial: {serial}") if vid_hex is None and pid_hex is None and serial is None: print(" - (no specifications given for USB device)") print() print("Press [Enter] when the USB device is attached") input() ATTACH_USB_DEVICE = { "parallels": attach_parallels, "virtualbox": attach_virtualbox, "vmware_desktop": attach_vmware, } def generate_packer_config(platform, file_path, providers): builders = [] provisioners = [] for provider_name in providers: builders.append( { "name": f"{provider_name}", "type": "vagrant", "box_name": f"microtvm-base-{provider_name}", "output_dir": f"output-packer-{provider_name}", "communicator": "ssh", "source_path": "generic/ubuntu1804", "provider": provider_name, "template": "Vagrantfile.packer-template", } ) repo_root = subprocess.check_output( ["git", "rev-parse", "--show-toplevel"], encoding="utf-8" ).strip() for script in EXTRA_SCRIPTS[platform]: script_path = os.path.join(repo_root, script) filename = os.path.basename(script_path) provisioners.append({"type": "file", "source": script_path, "destination": f"~/{filename}"}) provisioners.append( { "type": "shell", "script": "base_box_setup.sh", } ) provisioners.append( { "type": "shell", "script": "base_box_provision.sh", } ) with open(file_path, "w") as f: json.dump( { "builders": builders, "provisioners": provisioners, }, f, sort_keys=True, indent=2, ) def build_command(args): generate_packer_config( args.platform, os.path.join(THIS_DIR, args.platform, "base-box", PACKER_FILE_NAME), args.provider or ALL_PROVIDERS, ) env = copy.copy(os.environ) packer_args = ["packer", "build"] env["PACKER_LOG"] = "1" env["PACKER_LOG_PATH"] = "packer.log" if args.debug_packer: packer_args += ["-debug"] packer_args += [PACKER_FILE_NAME] subprocess.check_call( packer_args, cwd=os.path.join(THIS_DIR, args.platform, "base-box"), env=env ) REQUIRED_TEST_CONFIG_KEYS = { "vid_hex": str, "pid_hex": str, } VM_BOX_RE = re.compile(r'(.*\.vm\.box) = "(.*)"') # Paths, relative to the platform box directory, which will not be copied to release-test dir. SKIP_COPY_PATHS = [".vagrant", "base-box"] def do_build_release_test_vm( release_test_dir, user_box_dir: pathlib.Path, base_box_dir: pathlib.Path, provider_name ): if os.path.exists(release_test_dir): try: subprocess.check_call(["vagrant", "destroy", "-f"], cwd=release_test_dir) except subprocess.CalledProcessError: _LOG.warning("vagrant destroy failed--removing dirtree anyhow", exc_info=True) shutil.rmtree(release_test_dir) for dirpath, _, filenames in os.walk(user_box_dir): rel_path = os.path.relpath(dirpath, user_box_dir) if any( rel_path == scp or rel_path.startswith(f"{scp}{os.path.sep}") for scp in SKIP_COPY_PATHS ): continue dest_dir = os.path.join(release_test_dir, rel_path) os.makedirs(dest_dir) for filename in filenames: shutil.copy2(os.path.join(dirpath, filename), os.path.join(dest_dir, filename)) release_test_vagrantfile = os.path.join(release_test_dir, "Vagrantfile") with open(release_test_vagrantfile) as f: lines = list(f) found_box_line = False with open(release_test_vagrantfile, "w") as f: for line in lines: m = VM_BOX_RE.match(line) if not m: f.write(line) continue box_package = os.path.join( base_box_dir, f"output-packer-{provider_name}", "package.box" ) box_relpath = os.path.relpath(box_package, release_test_dir) f.write(f'{m.group(1)} = "{box_relpath}"\n') found_box_line = True if not found_box_line: _LOG.error( "testing provider %s: couldn't find config.box.vm = line in Vagrantfile; unable to test", provider_name, ) return False # Delete the old box registered with Vagrant, which may lead to a falsely-passing release test. remove_args = ["vagrant", "box", "remove", box_relpath] return_code = subprocess.call(remove_args, cwd=release_test_dir) assert return_code in (0, 1), f'{" ".join(remove_args)} returned exit code {return_code}' subprocess.check_call(["vagrant", "up", f"--provider={provider_name}"], cwd=release_test_dir) return True def do_run_release_test(release_test_dir, platform, provider_name, test_config, test_device_serial): with open( os.path.join(release_test_dir, ".vagrant", "machines", "default", provider_name, "id") ) as f: machine_uuid = f.read() # Check if target is not QEMU if test_config["vid_hex"] and test_config["pid_hex"]: ATTACH_USB_DEVICE[provider_name]( machine_uuid, vid_hex=test_config["vid_hex"], pid_hex=test_config["pid_hex"], serial=test_device_serial, ) tvm_home = os.path.realpath(os.path.join(THIS_DIR, "..", "..", "..")) def _quote_cmd(cmd): return " ".join(shlex.quote(a) for a in cmd) test_cmd = ( _quote_cmd(["cd", tvm_home]) + " && " + _quote_cmd( [ f"apps/microtvm/reference-vm/{platform}/base-box/base_box_test.sh", test_config["microtvm_board"], ] ) ) subprocess.check_call(["vagrant", "ssh", "-c", f"bash -ec '{test_cmd}'"], cwd=release_test_dir) def test_command(args): user_box_dir = pathlib.Path(THIS_DIR) / args.platform base_box_dir = user_box_dir / "base-box" boards_file = pathlib.Path(THIS_DIR) / ".." / args.platform / "template_project" / "boards.json" with open(boards_file) as f: test_config = json.load(f) # select microTVM test config microtvm_test_config = test_config[args.microtvm_board] for key, expected_type in REQUIRED_TEST_CONFIG_KEYS.items(): assert key in microtvm_test_config and isinstance( microtvm_test_config[key], expected_type ), f"Expected key {key} of type {expected_type} in {boards_file}: {test_config!r}" microtvm_test_config["vid_hex"] = microtvm_test_config["vid_hex"].lower() microtvm_test_config["pid_hex"] = microtvm_test_config["pid_hex"].lower() microtvm_test_config["microtvm_board"] = args.microtvm_board providers = args.provider release_test_dir = os.path.join(THIS_DIR, f"release-test-{args.platform}") if args.skip_build or args.skip_destroy: assert ( len(providers) == 1 ), "--skip-build and/or --skip-destroy was given, but >1 provider specified" test_failed = False for provider_name in providers: try: if not args.skip_build: do_build_release_test_vm( release_test_dir, user_box_dir, base_box_dir, provider_name ) do_run_release_test( release_test_dir, args.platform, provider_name, microtvm_test_config, args.test_device_serial, ) except subprocess.CalledProcessError: test_failed = True sys.exit( f"\n\nERROR: Provider '{provider_name}' failed the release test. " "You can re-run it to reproduce the issue without building everything " "again by passing the --skip-build and specifying only the provider that failed. " "The VM is still running in case you want to connect it via SSH to " "investigate further the issue, thus it's necessary to destroy it manually " "to release the resources back to the host, like a USB device attached to the VM." ) finally: # if we reached out here do_run_release_test() succeeded, hence we can # destroy the VM and release the resources back to the host if user haven't # requested to not destroy it. if not (args.skip_destroy or test_failed): subprocess.check_call(["vagrant", "destroy", "-f"], cwd=release_test_dir) shutil.rmtree(release_test_dir) print(f'\n\nThe release tests passed on all specified providers: {", ".join(providers)}.') def release_command(args): if args.release_full_name: vm_name = args.release_full_name else: vm_name = f"tlcpack/microtvm-{args.platform}-{args.platform_version}" if not args.skip_creating_release_version: subprocess.check_call( [ "vagrant", "cloud", "version", "create", vm_name, args.release_version, ] ) if not args.release_version: sys.exit(f"--release-version must be specified") for provider_name in args.provider: subprocess.check_call( [ "vagrant", "cloud", "publish", "-f", vm_name, args.release_version, provider_name, os.path.join( THIS_DIR, args.platform, "base-box", f"output-packer-{provider_name}/package.box", ), ] ) def parse_args(): parser = argparse.ArgumentParser( description="Automates building, testing, and releasing a base box" ) subparsers = parser.add_subparsers(help="Action to perform.") subparsers.required = True subparsers.dest = "action" parser.add_argument( "--provider", choices=ALL_PROVIDERS, action="append", required=True, help="Name of the provider or providers to act on", ) # "test" has special options for different platforms, and "build", "release" might # in the future, so we'll add the platform argument to each one individually. platform_help_str = "Platform to use (e.g. Arduino, Zephyr)" # Options for build subcommand parser_build = subparsers.add_parser("build", help="Build a base box.") parser_build.set_defaults(func=build_command) parser_build.add_argument("platform", help=platform_help_str, choices=ALL_PLATFORMS) parser_build.add_argument( "--debug-packer", action="store_true", help=("Run packer in debug mode, and write log to the base-box directory."), ) # Options for test subcommand parser_test = subparsers.add_parser("test", help="Test a base box before release.") parser_test.set_defaults(func=test_command) parser_test.add_argument( "--skip-build", action="store_true", help=( "If given, assume a box has already been built in the release-test subdirectory, " "so use that box to execute the release test script. If the tests fail the VM used " "for testing will be left running for further investigation and will need to be " "destroyed manually. If all tests pass on all specified providers no VM is left running, " "unless --skip-destroy is given too." ), ) parser_test.add_argument( "--skip-destroy", action="store_true", help=( "Skip destroying the test VM even if all tests pass. Can only be used if a single " "provider is specified. Default is to destroy the VM if all tests pass (and always " "skip destroying it if a test fails)." ), ) parser_test.add_argument( "--test-device-serial", help=( "If given, attach the test device with this USB serial number. Corresponds to the " "iSerial field from `lsusb -v` output." ), ) parser_test_platform_subparsers = parser_test.add_subparsers(help=platform_help_str) for platform in ALL_PLATFORMS: platform_specific_parser = parser_test_platform_subparsers.add_parser(platform) platform_specific_parser.set_defaults(platform=platform) platform_specific_parser.add_argument( "--microtvm-board", choices=ALL_MICROTVM_BOARDS[platform], required=True, help="MicroTVM board used for testing.", ) # Options for release subcommand parser_release = subparsers.add_parser("release", help="Release base box to cloud.") parser_release.set_defaults(func=release_command) parser_release.add_argument("platform", help=platform_help_str, choices=ALL_PLATFORMS) parser_release.add_argument( "--release-version", required=True, help="Version to release, in the form 'x.y.z'. Must be specified with release.", ) parser_release.add_argument( "--skip-creating-release-version", action="store_true", help="Skip creating the version and just upload for this provider.", ) parser_release.add_argument( "--platform-version", required=False, help=( "For Zephyr, the platform version to release, in the form 'x.y'. " "For Arduino, the version of arduino-cli that's being used, in the form 'x.y.z'." ), ) parser_release.add_argument( "--release-full-name", required=False, type=str, default=None, help=( "If set, it will use this as the full release name and version for the box. " "If this set, it will ignore `--platform-version` and `--release-version`." ), ) args = parser.parse_args() if args.action == "release" and not args.release_full_name: parser.error("--platform-version is requireed.") return args def main(): args = parse_args() if os.path.sep in args.platform or not os.path.isdir(os.path.join(THIS_DIR, args.platform)): sys.exit(f"<platform> must be a sub-direcotry of {THIS_DIR}; got {args.platform}") args.func(args) if __name__ == "__main__": main()
import os import shutil from pathlib import Path from typing import Any, Dict, List, Optional, Tuple import yaml from plotter import __version__ from plotter.consensus.coinbase import create_puzzlehash_for_pk from plotter.ssl.create_ssl import generate_ca_signed_cert, get_plotter_ca_crt_key, make_ca_cert from plotter.util.bech32m import encode_puzzle_hash from plotter.util.config import ( create_default_plotter_config, initial_config_file, load_config, save_config, unflatten_properties, ) from plotter.util.ints import uint32 from plotter.util.keychain import Keychain from plotter.util.path import mkdir from plotter.wallet.derive_keys import master_sk_to_pool_sk, master_sk_to_wallet_sk private_node_names = {"full_node", "wallet", "farmer", "harvester", "timelord", "daemon"} public_node_names = {"full_node", "wallet", "farmer", "introducer", "timelord"} def dict_add_new_default(updated: Dict, default: Dict, do_not_migrate_keys: Dict[str, Any]): for k in do_not_migrate_keys: if k in updated and do_not_migrate_keys[k] == "": updated.pop(k) for k, v in default.items(): ignore = False if k in do_not_migrate_keys: do_not_data = do_not_migrate_keys[k] if isinstance(do_not_data, dict): ignore = False else: ignore = True if isinstance(v, dict) and k in updated and ignore is False: # If there is an intermediate key with empty string value, do not migrate all descendants if do_not_migrate_keys.get(k, None) == "": do_not_migrate_keys[k] = v dict_add_new_default(updated[k], default[k], do_not_migrate_keys.get(k, {})) elif k not in updated or ignore is True: updated[k] = v def check_keys(new_root: Path) -> None: keychain: Keychain = Keychain() all_sks = keychain.get_all_private_keys() if len(all_sks) == 0: print("No keys are present in the keychain. Generate them with 'plotter keys generate'") return None config: Dict = load_config(new_root, "config.yaml") pool_child_pubkeys = [master_sk_to_pool_sk(sk).get_g1() for sk, _ in all_sks] all_targets = [] stop_searching_for_farmer = "pltr_target_address" not in config["farmer"] stop_searching_for_pool = "pltr_target_address" not in config["pool"] number_of_ph_to_search = 500 selected = config["selected_network"] prefix = config["network_overrides"]["config"][selected]["address_prefix"] for i in range(number_of_ph_to_search): if stop_searching_for_farmer and stop_searching_for_pool and i > 0: break for sk, _ in all_sks: all_targets.append( encode_puzzle_hash(create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1()), prefix) ) if all_targets[-1] == config["farmer"].get("pltr_target_address"): stop_searching_for_farmer = True if all_targets[-1] == config["pool"].get("pltr_target_address"): stop_searching_for_pool = True # Set the destinations if "pltr_target_address" not in config["farmer"]: print(f"Setting the pltr destination address for coinbase fees reward to {all_targets[0]}") config["farmer"]["pltr_target_address"] = all_targets[0] elif config["farmer"]["pltr_target_address"] not in all_targets: print( f"WARNING: using a farmer address which we don't have the private" f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding " f"{config["farmer"]["pltr_target_address"]} with {all_targets[0]}" ) if "pool" not in config: config["pool"] = {} if "pltr_target_address" not in config["pool"]: print(f"Setting the pltr destination address for coinbase reward to {all_targets[0]}") config["pool"]["pltr_target_address"] = all_targets[0] elif config["pool"]["pltr_target_address"] not in all_targets: print( f"WARNING: using a pool address which we don't have the private" f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding " f"{config["pool"]["pltr_target_address"]} with {all_targets[0]}" ) # Set the pool pks in the farmer pool_pubkeys_hex = set(bytes(pk).hex() for pk in pool_child_pubkeys) if "pool_public_keys" in config["farmer"]: for pk_hex in config["farmer"]["pool_public_keys"]: # Add original ones in config pool_pubkeys_hex.add(pk_hex) config["farmer"]["pool_public_keys"] = pool_pubkeys_hex save_config(new_root, "config.yaml", config) def copy_files_rec(old_path: Path, new_path: Path): if old_path.is_file(): print(f"{new_path}") mkdir(new_path.parent) shutil.copy(old_path, new_path) elif old_path.is_dir(): for old_path_child in old_path.iterdir(): new_path_child = new_path / old_path_child.name copy_files_rec(old_path_child, new_path_child) def migrate_from( old_root: Path, new_root: Path, manifest: List[str], do_not_migrate_settings: List[str], ): """ Copy all the files in "manifest" to the new config directory. """ if old_root == new_root: print("same as new path, exiting") return 1 if not old_root.is_dir(): print(f"{old_root} not found - this is ok if you did not install this version") return 0 print(f"\n{old_root} found") print(f"Copying files from {old_root} to {new_root}\n") for f in manifest: old_path = old_root / f new_path = new_root / f copy_files_rec(old_path, new_path) # update config yaml with new keys config: Dict = load_config(new_root, "config.yaml") config_str: str = initial_config_file("config.yaml") default_config: Dict = yaml.safe_load(config_str) flattened_keys = unflatten_properties({k: "" for k in do_not_migrate_settings}) dict_add_new_default(config, default_config, flattened_keys) save_config(new_root, "config.yaml", config) create_all_ssl(new_root) return 1 def create_all_ssl(root: Path): # remove old key and crt config_dir = root / "config" old_key_path = config_dir / "trusted.key" old_crt_path = config_dir / "trusted.crt" if old_key_path.exists(): print(f"Old key not needed anymore, deleting {old_key_path}") os.remove(old_key_path) if old_crt_path.exists(): print(f"Old crt not needed anymore, deleting {old_crt_path}") os.remove(old_crt_path) ssl_dir = config_dir / "ssl" if not ssl_dir.exists(): ssl_dir.mkdir() ca_dir = ssl_dir / "ca" if not ca_dir.exists(): ca_dir.mkdir() private_ca_key_path = ca_dir / "private_ca.key" private_ca_crt_path = ca_dir / "private_ca.crt" plotter_ca_crt, plotter_ca_key = get_plotter_ca_crt_key() plotter_ca_crt_path = ca_dir / "plotter_ca.crt" plotter_ca_key_path = ca_dir / "plotter_ca.key" plotter_ca_crt_path.write_bytes(plotter_ca_crt) plotter_ca_key_path.write_bytes(plotter_ca_key) if not private_ca_key_path.exists() or not private_ca_crt_path.exists(): # Create private CA print(f"Can't find private CA, creating a new one in {root} to generate TLS certificates") make_ca_cert(private_ca_crt_path, private_ca_key_path) # Create private certs for each node ca_key = private_ca_key_path.read_bytes() ca_crt = private_ca_crt_path.read_bytes() generate_ssl_for_nodes(ssl_dir, ca_crt, ca_key, True) else: # This is entered when user copied over private CA print(f"Found private CA in {root}, using it to generate TLS certificates") ca_key = private_ca_key_path.read_bytes() ca_crt = private_ca_crt_path.read_bytes() generate_ssl_for_nodes(ssl_dir, ca_crt, ca_key, True) plotter_ca_crt, plotter_ca_key = get_plotter_ca_crt_key() generate_ssl_for_nodes(ssl_dir, plotter_ca_crt, plotter_ca_key, False, overwrite=False) def generate_ssl_for_nodes(ssl_dir: Path, ca_crt: bytes, ca_key: bytes, private: bool, overwrite=True): if private: names = private_node_names else: names = public_node_names for node_name in names: node_dir = ssl_dir / node_name if not node_dir.exists(): node_dir.mkdir() if private: prefix = "private" else: prefix = "public" key_path = node_dir / f"{prefix}_{node_name}.key" crt_path = node_dir / f"{prefix}_{node_name}.crt" if key_path.exists() and crt_path.exists() and overwrite is False: continue generate_ca_signed_cert(ca_crt, ca_key, crt_path, key_path) def copy_cert_files(cert_path: Path, new_path: Path): for ext in "*.crt", "*.key": for old_path_child in cert_path.glob(ext): new_path_child = new_path / old_path_child.name copy_files_rec(old_path_child, new_path_child) def init(create_certs: Optional[Path], root_path: Path): if create_certs is not None: if root_path.exists(): if os.path.isdir(create_certs): ca_dir: Path = root_path / "config/ssl/ca" if ca_dir.exists(): print(f"Deleting your OLD CA in {ca_dir}") shutil.rmtree(ca_dir) print(f"Copying your CA from {create_certs} to {ca_dir}") copy_cert_files(create_certs, ca_dir) create_all_ssl(root_path) else: print(f"** Directory {create_certs} does not exist **") else: print(f"** {root_path} does not exist. Executing core init **") # sanity check here to prevent infinite recursion if plotter_init(root_path) == 0 and root_path.exists(): return init(create_certs, root_path) print(f"** {root_path} was not created. Exiting **") return -1 else: return plotter_init(root_path) def plotter_version_number() -> Tuple[str, str, str, str]: scm_full_version = __version__ left_full_version = scm_full_version.split("+") version = left_full_version[0].split(".") scm_major_version = version[0] scm_minor_version = version[1] if len(version) > 2: smc_patch_version = version[2] patch_release_number = smc_patch_version else: smc_patch_version = "" major_release_number = scm_major_version minor_release_number = scm_minor_version dev_release_number = "" # If this is a beta dev release - get which beta it is if "0b" in scm_minor_version: original_minor_ver_list = scm_minor_version.split("0b") major_release_number = str(1 - int(scm_major_version)) # decrement the major release for beta minor_release_number = scm_major_version patch_release_number = original_minor_ver_list[1] if smc_patch_version and "dev" in smc_patch_version: dev_release_number = "." + smc_patch_version elif "0rc" in version[1]: original_minor_ver_list = scm_minor_version.split("0rc") major_release_number = str(1 - int(scm_major_version)) # decrement the major release for release candidate minor_release_number = str(int(scm_major_version) + 1) # RC is 0.2.1 for RC 1 patch_release_number = original_minor_ver_list[1] if smc_patch_version and "dev" in smc_patch_version: dev_release_number = "." + smc_patch_version else: major_release_number = scm_major_version minor_release_number = scm_minor_version patch_release_number = smc_patch_version dev_release_number = "" install_release_number = major_release_number + "." + minor_release_number if len(patch_release_number) > 0: install_release_number += "." + patch_release_number if len(dev_release_number) > 0: install_release_number += dev_release_number return major_release_number, minor_release_number, patch_release_number, dev_release_number def plotter_minor_release_number(): res = int(plotter_version_number()[2]) print(f"Install release number: {res}") return res def plotter_full_version_str() -> str: major, minor, patch, dev = plotter_version_number() return f"{major}.{minor}.{patch}{dev}" def plotter_init(root_path: Path): if os.environ.get("PLOTTER_ROOT", None) is not None: print( f"warning, your PLOTTER_ROOT is set to {os.environ["PLOTTER_ROOT"]}. " f"Please unset the environment variable and run plotter init again\n" f"or manually migrate config.yaml" ) print(f"Plotter directory {root_path}") if root_path.is_dir() and Path(root_path / "config" / "config.yaml").exists(): # This is reached if PLOTTER_ROOT is set, or if user has run plotter init twice # before a new update. check_keys(root_path) print(f"{root_path} already exists, no migration action taken") return -1 create_default_plotter_config(root_path) create_all_ssl(root_path) check_keys(root_path) print("") print("To see your keys, run 'plotter keys show --show-mnemonic-seed'") return 0
import os import shutil from pathlib import Path from typing import Any, Dict, List, Optional, Tuple import yaml from plotter import __version__ from plotter.consensus.coinbase import create_puzzlehash_for_pk from plotter.ssl.create_ssl import generate_ca_signed_cert, get_plotter_ca_crt_key, make_ca_cert from plotter.util.bech32m import encode_puzzle_hash from plotter.util.config import ( create_default_plotter_config, initial_config_file, load_config, save_config, unflatten_properties, ) from plotter.util.ints import uint32 from plotter.util.keychain import Keychain from plotter.util.path import mkdir from plotter.wallet.derive_keys import master_sk_to_pool_sk, master_sk_to_wallet_sk private_node_names = {"full_node", "wallet", "farmer", "harvester", "timelord", "daemon"} public_node_names = {"full_node", "wallet", "farmer", "introducer", "timelord"} def dict_add_new_default(updated: Dict, default: Dict, do_not_migrate_keys: Dict[str, Any]): for k in do_not_migrate_keys: if k in updated and do_not_migrate_keys[k] == "": updated.pop(k) for k, v in default.items(): ignore = False if k in do_not_migrate_keys: do_not_data = do_not_migrate_keys[k] if isinstance(do_not_data, dict): ignore = False else: ignore = True if isinstance(v, dict) and k in updated and ignore is False: # If there is an intermediate key with empty string value, do not migrate all descendants if do_not_migrate_keys.get(k, None) == "": do_not_migrate_keys[k] = v dict_add_new_default(updated[k], default[k], do_not_migrate_keys.get(k, {})) elif k not in updated or ignore is True: updated[k] = v def check_keys(new_root: Path) -> None: keychain: Keychain = Keychain() all_sks = keychain.get_all_private_keys() if len(all_sks) == 0: print("No keys are present in the keychain. Generate them with 'plotter keys generate'") return None config: Dict = load_config(new_root, "config.yaml") pool_child_pubkeys = [master_sk_to_pool_sk(sk).get_g1() for sk, _ in all_sks] all_targets = [] stop_searching_for_farmer = "pltr_target_address" not in config["farmer"] stop_searching_for_pool = "pltr_target_address" not in config["pool"] number_of_ph_to_search = 500 selected = config["selected_network"] prefix = config["network_overrides"]["config"][selected]["address_prefix"] for i in range(number_of_ph_to_search): if stop_searching_for_farmer and stop_searching_for_pool and i > 0: break for sk, _ in all_sks: all_targets.append( encode_puzzle_hash(create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1()), prefix) ) if all_targets[-1] == config["farmer"].get("pltr_target_address"): stop_searching_for_farmer = True if all_targets[-1] == config["pool"].get("pltr_target_address"): stop_searching_for_pool = True # Set the destinations if "pltr_target_address" not in config["farmer"]: print(f"Setting the pltr destination address for coinbase fees reward to {all_targets[0]}") config["farmer"]["pltr_target_address"] = all_targets[0] elif config["farmer"]["pltr_target_address"] not in all_targets: print( f"WARNING: using a farmer address which we don't have the private" f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding " f"{config['farmer']['pltr_target_address']} with {all_targets[0]}" ) if "pool" not in config: config["pool"] = {} if "pltr_target_address" not in config["pool"]: print(f"Setting the pltr destination address for coinbase reward to {all_targets[0]}") config["pool"]["pltr_target_address"] = all_targets[0] elif config["pool"]["pltr_target_address"] not in all_targets: print( f"WARNING: using a pool address which we don't have the private" f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding " f"{config['pool']['pltr_target_address']} with {all_targets[0]}" ) # Set the pool pks in the farmer pool_pubkeys_hex = set(bytes(pk).hex() for pk in pool_child_pubkeys) if "pool_public_keys" in config["farmer"]: for pk_hex in config["farmer"]["pool_public_keys"]: # Add original ones in config pool_pubkeys_hex.add(pk_hex) config["farmer"]["pool_public_keys"] = pool_pubkeys_hex save_config(new_root, "config.yaml", config) def copy_files_rec(old_path: Path, new_path: Path): if old_path.is_file(): print(f"{new_path}") mkdir(new_path.parent) shutil.copy(old_path, new_path) elif old_path.is_dir(): for old_path_child in old_path.iterdir(): new_path_child = new_path / old_path_child.name copy_files_rec(old_path_child, new_path_child) def migrate_from( old_root: Path, new_root: Path, manifest: List[str], do_not_migrate_settings: List[str], ): """ Copy all the files in "manifest" to the new config directory. """ if old_root == new_root: print("same as new path, exiting") return 1 if not old_root.is_dir(): print(f"{old_root} not found - this is ok if you did not install this version") return 0 print(f"\n{old_root} found") print(f"Copying files from {old_root} to {new_root}\n") for f in manifest: old_path = old_root / f new_path = new_root / f copy_files_rec(old_path, new_path) # update config yaml with new keys config: Dict = load_config(new_root, "config.yaml") config_str: str = initial_config_file("config.yaml") default_config: Dict = yaml.safe_load(config_str) flattened_keys = unflatten_properties({k: "" for k in do_not_migrate_settings}) dict_add_new_default(config, default_config, flattened_keys) save_config(new_root, "config.yaml", config) create_all_ssl(new_root) return 1 def create_all_ssl(root: Path): # remove old key and crt config_dir = root / "config" old_key_path = config_dir / "trusted.key" old_crt_path = config_dir / "trusted.crt" if old_key_path.exists(): print(f"Old key not needed anymore, deleting {old_key_path}") os.remove(old_key_path) if old_crt_path.exists(): print(f"Old crt not needed anymore, deleting {old_crt_path}") os.remove(old_crt_path) ssl_dir = config_dir / "ssl" if not ssl_dir.exists(): ssl_dir.mkdir() ca_dir = ssl_dir / "ca" if not ca_dir.exists(): ca_dir.mkdir() private_ca_key_path = ca_dir / "private_ca.key" private_ca_crt_path = ca_dir / "private_ca.crt" plotter_ca_crt, plotter_ca_key = get_plotter_ca_crt_key() plotter_ca_crt_path = ca_dir / "plotter_ca.crt" plotter_ca_key_path = ca_dir / "plotter_ca.key" plotter_ca_crt_path.write_bytes(plotter_ca_crt) plotter_ca_key_path.write_bytes(plotter_ca_key) if not private_ca_key_path.exists() or not private_ca_crt_path.exists(): # Create private CA print(f"Can't find private CA, creating a new one in {root} to generate TLS certificates") make_ca_cert(private_ca_crt_path, private_ca_key_path) # Create private certs for each node ca_key = private_ca_key_path.read_bytes() ca_crt = private_ca_crt_path.read_bytes() generate_ssl_for_nodes(ssl_dir, ca_crt, ca_key, True) else: # This is entered when user copied over private CA print(f"Found private CA in {root}, using it to generate TLS certificates") ca_key = private_ca_key_path.read_bytes() ca_crt = private_ca_crt_path.read_bytes() generate_ssl_for_nodes(ssl_dir, ca_crt, ca_key, True) plotter_ca_crt, plotter_ca_key = get_plotter_ca_crt_key() generate_ssl_for_nodes(ssl_dir, plotter_ca_crt, plotter_ca_key, False, overwrite=False) def generate_ssl_for_nodes(ssl_dir: Path, ca_crt: bytes, ca_key: bytes, private: bool, overwrite=True): if private: names = private_node_names else: names = public_node_names for node_name in names: node_dir = ssl_dir / node_name if not node_dir.exists(): node_dir.mkdir() if private: prefix = "private" else: prefix = "public" key_path = node_dir / f"{prefix}_{node_name}.key" crt_path = node_dir / f"{prefix}_{node_name}.crt" if key_path.exists() and crt_path.exists() and overwrite is False: continue generate_ca_signed_cert(ca_crt, ca_key, crt_path, key_path) def copy_cert_files(cert_path: Path, new_path: Path): for ext in "*.crt", "*.key": for old_path_child in cert_path.glob(ext): new_path_child = new_path / old_path_child.name copy_files_rec(old_path_child, new_path_child) def init(create_certs: Optional[Path], root_path: Path): if create_certs is not None: if root_path.exists(): if os.path.isdir(create_certs): ca_dir: Path = root_path / "config/ssl/ca" if ca_dir.exists(): print(f"Deleting your OLD CA in {ca_dir}") shutil.rmtree(ca_dir) print(f"Copying your CA from {create_certs} to {ca_dir}") copy_cert_files(create_certs, ca_dir) create_all_ssl(root_path) else: print(f"** Directory {create_certs} does not exist **") else: print(f"** {root_path} does not exist. Executing core init **") # sanity check here to prevent infinite recursion if plotter_init(root_path) == 0 and root_path.exists(): return init(create_certs, root_path) print(f"** {root_path} was not created. Exiting **") return -1 else: return plotter_init(root_path) def plotter_version_number() -> Tuple[str, str, str, str]: scm_full_version = __version__ left_full_version = scm_full_version.split("+") version = left_full_version[0].split(".") scm_major_version = version[0] scm_minor_version = version[1] if len(version) > 2: smc_patch_version = version[2] patch_release_number = smc_patch_version else: smc_patch_version = "" major_release_number = scm_major_version minor_release_number = scm_minor_version dev_release_number = "" # If this is a beta dev release - get which beta it is if "0b" in scm_minor_version: original_minor_ver_list = scm_minor_version.split("0b") major_release_number = str(1 - int(scm_major_version)) # decrement the major release for beta minor_release_number = scm_major_version patch_release_number = original_minor_ver_list[1] if smc_patch_version and "dev" in smc_patch_version: dev_release_number = "." + smc_patch_version elif "0rc" in version[1]: original_minor_ver_list = scm_minor_version.split("0rc") major_release_number = str(1 - int(scm_major_version)) # decrement the major release for release candidate minor_release_number = str(int(scm_major_version) + 1) # RC is 0.2.1 for RC 1 patch_release_number = original_minor_ver_list[1] if smc_patch_version and "dev" in smc_patch_version: dev_release_number = "." + smc_patch_version else: major_release_number = scm_major_version minor_release_number = scm_minor_version patch_release_number = smc_patch_version dev_release_number = "" install_release_number = major_release_number + "." + minor_release_number if len(patch_release_number) > 0: install_release_number += "." + patch_release_number if len(dev_release_number) > 0: install_release_number += dev_release_number return major_release_number, minor_release_number, patch_release_number, dev_release_number def plotter_minor_release_number(): res = int(plotter_version_number()[2]) print(f"Install release number: {res}") return res def plotter_full_version_str() -> str: major, minor, patch, dev = plotter_version_number() return f"{major}.{minor}.{patch}{dev}" def plotter_init(root_path: Path): if os.environ.get("PLOTTER_ROOT", None) is not None: print( f"warning, your PLOTTER_ROOT is set to {os.environ['PLOTTER_ROOT']}. " f"Please unset the environment variable and run plotter init again\n" f"or manually migrate config.yaml" ) print(f"Plotter directory {root_path}") if root_path.is_dir() and Path(root_path / "config" / "config.yaml").exists(): # This is reached if PLOTTER_ROOT is set, or if user has run plotter init twice # before a new update. check_keys(root_path) print(f"{root_path} already exists, no migration action taken") return -1 create_default_plotter_config(root_path) create_all_ssl(root_path) check_keys(root_path) print("") print("To see your keys, run 'plotter keys show --show-mnemonic-seed'") return 0
"""Select platform for Advantage Air integration.""" from homeassistant.components.select import SelectEntity from .const import DOMAIN as ADVANTAGE_AIR_DOMAIN from .entity import AdvantageAirEntity ADVANTAGE_AIR_INACTIVE = "Inactive" async def async_setup_entry(hass, config_entry, async_add_entities): """Set up AdvantageAir toggle platform.""" instance = hass.data[ADVANTAGE_AIR_DOMAIN][config_entry.entry_id] entities = [] for ac_key in instance["coordinator"].data["aircons"]: entities.append(AdvantageAirMyZone(instance, ac_key)) async_add_entities(entities) class AdvantageAirMyZone(AdvantageAirEntity, SelectEntity): """Representation of Advantage Air MyZone control.""" _attr_icon = "mdi:home-thermometer" _attr_options = [ADVANTAGE_AIR_INACTIVE] _number_to_name = {0: ADVANTAGE_AIR_INACTIVE} _name_to_number = {ADVANTAGE_AIR_INACTIVE: 0} def __init__(self, instance, ac_key): """Initialize an Advantage Air MyZone control.""" super().__init__(instance, ac_key) self._attr_name = f'{self._ac['name']} MyZone' self._attr_unique_id = ( f'{self.coordinator.data['system']['rid']}-{ac_key}-myzone' ) for zone in instance["coordinator"].data["aircons"][ac_key]["zones"].values(): if zone["type"] > 0: self._name_to_number[zone["name"]] = zone["number"] self._number_to_name[zone["number"]] = zone["name"] self._attr_options.append(zone["name"]) @property def current_option(self): """Return the fresh air status.""" return self._number_to_name[self._ac["myZone"]] async def async_select_option(self, option): """Set the MyZone.""" await self.async_change( {self.ac_key: {"info": {"myZone": self._name_to_number[option]}}} )
"""Select platform for Advantage Air integration.""" from homeassistant.components.select import SelectEntity from .const import DOMAIN as ADVANTAGE_AIR_DOMAIN from .entity import AdvantageAirEntity ADVANTAGE_AIR_INACTIVE = "Inactive" async def async_setup_entry(hass, config_entry, async_add_entities): """Set up AdvantageAir toggle platform.""" instance = hass.data[ADVANTAGE_AIR_DOMAIN][config_entry.entry_id] entities = [] for ac_key in instance["coordinator"].data["aircons"]: entities.append(AdvantageAirMyZone(instance, ac_key)) async_add_entities(entities) class AdvantageAirMyZone(AdvantageAirEntity, SelectEntity): """Representation of Advantage Air MyZone control.""" _attr_icon = "mdi:home-thermometer" _attr_options = [ADVANTAGE_AIR_INACTIVE] _number_to_name = {0: ADVANTAGE_AIR_INACTIVE} _name_to_number = {ADVANTAGE_AIR_INACTIVE: 0} def __init__(self, instance, ac_key): """Initialize an Advantage Air MyZone control.""" super().__init__(instance, ac_key) self._attr_name = f'{self._ac["name"]} MyZone' self._attr_unique_id = ( f'{self.coordinator.data["system"]["rid"]}-{ac_key}-myzone' ) for zone in instance["coordinator"].data["aircons"][ac_key]["zones"].values(): if zone["type"] > 0: self._name_to_number[zone["name"]] = zone["number"] self._number_to_name[zone["number"]] = zone["name"] self._attr_options.append(zone["name"]) @property def current_option(self): """Return the fresh air status.""" return self._number_to_name[self._ac["myZone"]] async def async_select_option(self, option): """Set the MyZone.""" await self.async_change( {self.ac_key: {"info": {"myZone": self._name_to_number[option]}}} )
import boto3 import os from botocore.exceptions import ClientError, NoCredentialsError class AWS: def __init__(self, logger): self.aws_authenticated = False self.set_logger(logger) self._set_account_id() self._set_region() self.get_notification_variables() self.boto3_client_map = dict() pass def get_notification_variables(self): # Get ASS_AWS_NOTIFICATION_MODE variable from SSM Parameter store. self.set_list_ssmparameters(['ASS_AWS_NOTIFICATION_MODE']) # Get variables depending on the ASS_AWS_NOTIFICATION_MODE variable from SSM Parameter store. if os.environ['NOTIFICATION_MODE'].upper() == "NONE": self.logger.info(f'NOTIFICATION_MODE variable available. Mode:{os.environ['NOTIFICATION_MODE']}') elif os.environ['NOTIFICATION_MODE'].upper() == "JIRA": self.logger.info('Getting Jira variables from Parameter store') self.set_list_ssmparameters(['ASS_AWS_JIRA_USER', 'ASS_AWS_JIRA_API_PASSWORD', 'ASS_AWS_JIRA_URL', 'ASS_AWS_JIRA_PROJECT']) self.logger.info('Jira variables available') elif os.environ['NOTIFICATION_MODE'].upper() == "GOOGLECHAT": self.logger.info('Getting Google Chat variable from Parameter store') self.set_list_ssmparameters(['ASS_AWS_CHATURL']) self.logger.info(f'Google chat variable available.') else: warning = "NOTIFICATION_MODE is unknown!!!" self.logger.warning(warning) raise Exception(warning) self.logger.info(f"Parameters set from SSM Parameter store") def set_list_ssmparameters(self, paramter_list: list): ssm_client = boto3.client('ssm', region_name=self.get_region()) try: response = ssm_client.get_parameters( Names=paramter_list, WithDecryption=True) for parameters in response['Parameters']: key = parameters['Name'] value = parameters['Value'] os.environ[str(key[8:])] = str(value) except Exception as e: self.logger.error(f"Error occurred while getting the objects from the SSM ParameterStore") raise def set_logger(self, logger): if logger.__module__ and logger.__module__ == 'logging': self.logger = logger else: raise Exception("Not a valid logger object") def empty_bucket(self, bucket): s3client = boto3.client('s3', region_name= self.get_region()) bucket_name = bucket['Name'] versioning_status = s3client.get_bucket_versioning(Bucket=bucket_name) try: self.logger.info(f"Connect to bucket {bucket_name}") s3 = boto3.resource('s3') bucket = s3.Bucket(bucket_name) self.logger.info(f"Start deletion of all objects in bucket {bucket_name}") bucket.objects.all().delete() bucket.object_versions.delete() self.logger.info(f"Finished deletion of all objects in bucket {bucket_name}") except AttributeError: self.logger.info(f"{bucket_name} is empty") except ClientError as e: if e.response['Error']['Code'] == 'NoSuchBucket': self.logger.warning(f"Bucket ({bucket_name}) does not exist error when deleting objects, continuing") except Exception: self.logger.error(f"Error occurred while deleting all objects in {bucket_name}") raise def is_aws_authenticated(self): return self.aws_authenticated def s3_has_tag(self, bucket_name, tag_name, tag_value): self.logger.debug(f"Checking bucket {bucket_name} for tag {tag_name} with value {tag_value}") s3_client = boto3.client('s3') try: response = s3_client.get_bucket_tagging(Bucket=bucket_name) self.logger.debug(response) for tag in response['TagSet']: self.logger.debug(tag) if tag['Key'] == tag_name and tag['Value'] == tag_value: self.logger.debug(f"Bucket {bucket_name} has tag {tag_name} with value {tag_value}") return True except ClientError: self.logger.debug(f"No TagSet found or bucket nog found for bucket {bucket_name}") return False def resource_has_tag(self, client, resource_arn, tag_name, tag_value=None): """ Checks if the resource with arn resource_arn has a tag tag_name. If tag_value is passed, the function returns True if the tag exists and the value matches, False in all other cases. If tag_value is not passed, the function returns the value of the tag if the tag exists, and False in all other cases. :param client: boto3 client :param resource_arn: arn of the resource to check for the tag :param tag_name: the name of the tag to search for :param tag_value: the value of the tag, if not passed or None, the value of the tag is returned. :return: True or False if tag_value is passed The value of the tag or False if tag_value is not passed or None """ self.logger.debug(f"Checking resource {resource_arn} for tag {tag_name} with value {tag_value}") try: response = "" # Checking for type of client if "RDS" in str(client.__class__): self.logger.debug(f"RDS Client detected") response = client.list_tags_for_resource(ResourceName=resource_arn)['TagList'] elif "CloudFront" in str(client.__class__): self.logger.debug(f"Cloudfront Client detected") response = client.list_tags_for_resource(Resource=resource_arn)['Tags']['Items'] else: self.logger.debug(f"Unknown client detected!") self.logger.debug(response) for tag in response: if tag['Key'] == tag_name: if tag_value is not None: if tag['Value'] == tag_value: self.logger.debug(f"Resource {resource_arn} has tag {tag_name} with value {tag["Value"]}") return True else: self.logger.debug(f"Resource {resource_arn} has tag {tag_name} but value {tag["Value"]} " f"does not match {tag_value}") return False else: self.logger.debug(f"Resource {resource_arn} has tag {tag_name} with value {tag["Value"]}") return tag['Value'] except Exception: return False return False def cfn_stack_exists(self, stack_name): try: response = self.get_boto3_client('cloudformation').describe_stacks(StackName=stack_name) if len(response) > 0: if response.get('Stacks')[0].get('StackStatus') in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']: return True except Exception: return False return False def get_boto3_client(self, resource_type, region_name=None): if resource_type not in self.boto3_client_map: if region_name is None: region_name = self.get_region() self.boto3_client_map[resource_type] = boto3.client(resource_type, region_name=region_name) return self.boto3_client_map[resource_type] def get_region(self): return self.region def get_account_id(self): return self.account_id def _set_region(self): try: self.region = boto3.session.Session().region_name self.aws_authenticated = True except NoCredentialsError: self.region = None def _set_account_id(self): try: self.account_id = boto3.client("sts").get_caller_identity()["Account"] self.aws_authenticated = True except NoCredentialsError: self.account_id = "" def create_bucket(self, bucket_name, private_bucket=False): try: self.logger.info(f"Create bucket {bucket_name} if it does not already exist.") s3 = boto3.resource('s3') s3_client = boto3.client('s3') if s3.Bucket(bucket_name) in s3.buckets.all(): self.logger.info(f"Bucket {bucket_name} already exists") else: self.logger.info(f"Start creation of bucket {bucket_name}") s3.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': self.get_region()}) if private_bucket: s3_client.put_public_access_block( Bucket=bucket_name, PublicAccessBlockConfiguration={ 'BlockPublicAcls': True, 'IgnorePublicAcls': True, 'BlockPublicPolicy': True, 'RestrictPublicBuckets': True }, ) self.logger.info(f"Finished creation of bucket {bucket_name}") except Exception: raise def remove_bucket(self, bucket_name): try: self.logger.info(f"Connect to bucket {bucket_name}") s3 = boto3.resource('s3') bucket = s3.Bucket(bucket_name) self.logger.info(f"Start deletion of all objects in bucket {bucket_name}") bucket.objects.all().delete() self.logger.info(f"Start deletion of bucket {bucket_name}") bucket.delete() self.logger.info(f"Finished deletion of bucket {bucket_name}") except Exception: self.logger.error(f"An error occurred while deleting bucket {bucket_name}") raise def backup_bucket(self, origin_bucket_name, backup_bucket_name): try: self.logger.info(f"Connect to bucket {origin_bucket_name}") s3 = boto3.client('s3') s3_resource = boto3.resource('s3') self.logger.info(f"Start backup of all objects in bucket {origin_bucket_name}") # Get all objects bucket = s3_resource.Bucket(origin_bucket_name) objects = bucket.objects.all() for obj in objects: copy_source = {'Bucket': origin_bucket_name, 'Key': obj.key} s3_resource.meta.client.copy(copy_source, backup_bucket_name, f"{origin_bucket_name}/{obj.key}") self.logger.info(f"Finished backup of bucket {origin_bucket_name} to {backup_bucket_name}") except Exception: self.logger.error(f"An error occurred while taking a backup of bucket {origin_bucket_name}") raise def restore_bucket(self, bucket_name, origin_bucket_name): try: self.logger.info(f"Connect to bucket {origin_bucket_name}") s3 = boto3.client('s3') s3_resource = boto3.resource('s3') # Get ACL tag self.logger.info(f"Getting ACL from bucket: {bucket_name}") acl = "" for tag in s3.get_bucket_tagging(Bucket=f"{bucket_name}")['TagSet']: if tag['Key'] == "ass:s3:backup-and-empty-bucket-on-stop-acl": bucket_tag = tag['Value'] acl = {'ACL': f"{bucket_tag}"} else: acl = {'ACL': "private"} # Starting restore self.logger.info(f"Start restore of all objects in bucket {origin_bucket_name}") bucket = s3_resource.Bucket(origin_bucket_name) objects = bucket.objects.all() for obj in objects: # full path (e.g. bucket/folder/test.png) origin_file_key = obj.key # path (e.g. folder/test.png) fn_new_bucket = "/".join(origin_file_key.strip("/").split('/')[1:]) if not origin_file_key.endswith("/"): copy_source = {'Bucket': origin_bucket_name, 'Key': origin_file_key} s3_resource.meta.client.copy(copy_source, bucket_name, fn_new_bucket, acl) s3.delete_object(Bucket=origin_bucket_name, Key=origin_file_key) self.logger.info(f"Finished backup of bucket {origin_bucket_name} to {bucket_name}") except Exception: self.logger.error(f"An error occurred while taking a backup of bucket {origin_bucket_name}") raise
import boto3 import os from botocore.exceptions import ClientError, NoCredentialsError class AWS: def __init__(self, logger): self.aws_authenticated = False self.set_logger(logger) self._set_account_id() self._set_region() self.get_notification_variables() self.boto3_client_map = dict() pass def get_notification_variables(self): # Get ASS_AWS_NOTIFICATION_MODE variable from SSM Parameter store. self.set_list_ssmparameters(['ASS_AWS_NOTIFICATION_MODE']) # Get variables depending on the ASS_AWS_NOTIFICATION_MODE variable from SSM Parameter store. if os.environ['NOTIFICATION_MODE'].upper() == "NONE": self.logger.info(f'NOTIFICATION_MODE variable available. Mode:{os.environ["NOTIFICATION_MODE"]}') elif os.environ['NOTIFICATION_MODE'].upper() == "JIRA": self.logger.info('Getting Jira variables from Parameter store') self.set_list_ssmparameters(['ASS_AWS_JIRA_USER', 'ASS_AWS_JIRA_API_PASSWORD', 'ASS_AWS_JIRA_URL', 'ASS_AWS_JIRA_PROJECT']) self.logger.info('Jira variables available') elif os.environ['NOTIFICATION_MODE'].upper() == "GOOGLECHAT": self.logger.info('Getting Google Chat variable from Parameter store') self.set_list_ssmparameters(['ASS_AWS_CHATURL']) self.logger.info(f'Google chat variable available.') else: warning = "NOTIFICATION_MODE is unknown!!!" self.logger.warning(warning) raise Exception(warning) self.logger.info(f"Parameters set from SSM Parameter store") def set_list_ssmparameters(self, paramter_list: list): ssm_client = boto3.client('ssm', region_name=self.get_region()) try: response = ssm_client.get_parameters( Names=paramter_list, WithDecryption=True) for parameters in response['Parameters']: key = parameters['Name'] value = parameters['Value'] os.environ[str(key[8:])] = str(value) except Exception as e: self.logger.error(f"Error occurred while getting the objects from the SSM ParameterStore") raise def set_logger(self, logger): if logger.__module__ and logger.__module__ == 'logging': self.logger = logger else: raise Exception("Not a valid logger object") def empty_bucket(self, bucket): s3client = boto3.client('s3', region_name= self.get_region()) bucket_name = bucket['Name'] versioning_status = s3client.get_bucket_versioning(Bucket=bucket_name) try: self.logger.info(f"Connect to bucket {bucket_name}") s3 = boto3.resource('s3') bucket = s3.Bucket(bucket_name) self.logger.info(f"Start deletion of all objects in bucket {bucket_name}") bucket.objects.all().delete() bucket.object_versions.delete() self.logger.info(f"Finished deletion of all objects in bucket {bucket_name}") except AttributeError: self.logger.info(f"{bucket_name} is empty") except ClientError as e: if e.response['Error']['Code'] == 'NoSuchBucket': self.logger.warning(f"Bucket ({bucket_name}) does not exist error when deleting objects, continuing") except Exception: self.logger.error(f"Error occurred while deleting all objects in {bucket_name}") raise def is_aws_authenticated(self): return self.aws_authenticated def s3_has_tag(self, bucket_name, tag_name, tag_value): self.logger.debug(f"Checking bucket {bucket_name} for tag {tag_name} with value {tag_value}") s3_client = boto3.client('s3') try: response = s3_client.get_bucket_tagging(Bucket=bucket_name) self.logger.debug(response) for tag in response['TagSet']: self.logger.debug(tag) if tag['Key'] == tag_name and tag['Value'] == tag_value: self.logger.debug(f"Bucket {bucket_name} has tag {tag_name} with value {tag_value}") return True except ClientError: self.logger.debug(f"No TagSet found or bucket nog found for bucket {bucket_name}") return False def resource_has_tag(self, client, resource_arn, tag_name, tag_value=None): """ Checks if the resource with arn resource_arn has a tag tag_name. If tag_value is passed, the function returns True if the tag exists and the value matches, False in all other cases. If tag_value is not passed, the function returns the value of the tag if the tag exists, and False in all other cases. :param client: boto3 client :param resource_arn: arn of the resource to check for the tag :param tag_name: the name of the tag to search for :param tag_value: the value of the tag, if not passed or None, the value of the tag is returned. :return: True or False if tag_value is passed The value of the tag or False if tag_value is not passed or None """ self.logger.debug(f"Checking resource {resource_arn} for tag {tag_name} with value {tag_value}") try: response = "" # Checking for type of client if "RDS" in str(client.__class__): self.logger.debug(f"RDS Client detected") response = client.list_tags_for_resource(ResourceName=resource_arn)['TagList'] elif "CloudFront" in str(client.__class__): self.logger.debug(f"Cloudfront Client detected") response = client.list_tags_for_resource(Resource=resource_arn)['Tags']['Items'] else: self.logger.debug(f"Unknown client detected!") self.logger.debug(response) for tag in response: if tag['Key'] == tag_name: if tag_value is not None: if tag['Value'] == tag_value: self.logger.debug(f"Resource {resource_arn} has tag {tag_name} with value {tag['Value']}") return True else: self.logger.debug(f"Resource {resource_arn} has tag {tag_name} but value {tag['Value']} " f"does not match {tag_value}") return False else: self.logger.debug(f"Resource {resource_arn} has tag {tag_name} with value {tag['Value']}") return tag['Value'] except Exception: return False return False def cfn_stack_exists(self, stack_name): try: response = self.get_boto3_client('cloudformation').describe_stacks(StackName=stack_name) if len(response) > 0: if response.get('Stacks')[0].get('StackStatus') in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']: return True except Exception: return False return False def get_boto3_client(self, resource_type, region_name=None): if resource_type not in self.boto3_client_map: if region_name is None: region_name = self.get_region() self.boto3_client_map[resource_type] = boto3.client(resource_type, region_name=region_name) return self.boto3_client_map[resource_type] def get_region(self): return self.region def get_account_id(self): return self.account_id def _set_region(self): try: self.region = boto3.session.Session().region_name self.aws_authenticated = True except NoCredentialsError: self.region = None def _set_account_id(self): try: self.account_id = boto3.client("sts").get_caller_identity()["Account"] self.aws_authenticated = True except NoCredentialsError: self.account_id = "" def create_bucket(self, bucket_name, private_bucket=False): try: self.logger.info(f"Create bucket {bucket_name} if it does not already exist.") s3 = boto3.resource('s3') s3_client = boto3.client('s3') if s3.Bucket(bucket_name) in s3.buckets.all(): self.logger.info(f"Bucket {bucket_name} already exists") else: self.logger.info(f"Start creation of bucket {bucket_name}") s3.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': self.get_region()}) if private_bucket: s3_client.put_public_access_block( Bucket=bucket_name, PublicAccessBlockConfiguration={ 'BlockPublicAcls': True, 'IgnorePublicAcls': True, 'BlockPublicPolicy': True, 'RestrictPublicBuckets': True }, ) self.logger.info(f"Finished creation of bucket {bucket_name}") except Exception: raise def remove_bucket(self, bucket_name): try: self.logger.info(f"Connect to bucket {bucket_name}") s3 = boto3.resource('s3') bucket = s3.Bucket(bucket_name) self.logger.info(f"Start deletion of all objects in bucket {bucket_name}") bucket.objects.all().delete() self.logger.info(f"Start deletion of bucket {bucket_name}") bucket.delete() self.logger.info(f"Finished deletion of bucket {bucket_name}") except Exception: self.logger.error(f"An error occurred while deleting bucket {bucket_name}") raise def backup_bucket(self, origin_bucket_name, backup_bucket_name): try: self.logger.info(f"Connect to bucket {origin_bucket_name}") s3 = boto3.client('s3') s3_resource = boto3.resource('s3') self.logger.info(f"Start backup of all objects in bucket {origin_bucket_name}") # Get all objects bucket = s3_resource.Bucket(origin_bucket_name) objects = bucket.objects.all() for obj in objects: copy_source = {'Bucket': origin_bucket_name, 'Key': obj.key} s3_resource.meta.client.copy(copy_source, backup_bucket_name, f"{origin_bucket_name}/{obj.key}") self.logger.info(f"Finished backup of bucket {origin_bucket_name} to {backup_bucket_name}") except Exception: self.logger.error(f"An error occurred while taking a backup of bucket {origin_bucket_name}") raise def restore_bucket(self, bucket_name, origin_bucket_name): try: self.logger.info(f"Connect to bucket {origin_bucket_name}") s3 = boto3.client('s3') s3_resource = boto3.resource('s3') # Get ACL tag self.logger.info(f"Getting ACL from bucket: {bucket_name}") acl = "" for tag in s3.get_bucket_tagging(Bucket=f"{bucket_name}")['TagSet']: if tag['Key'] == "ass:s3:backup-and-empty-bucket-on-stop-acl": bucket_tag = tag['Value'] acl = {'ACL': f"{bucket_tag}"} else: acl = {'ACL': "private"} # Starting restore self.logger.info(f"Start restore of all objects in bucket {origin_bucket_name}") bucket = s3_resource.Bucket(origin_bucket_name) objects = bucket.objects.all() for obj in objects: # full path (e.g. bucket/folder/test.png) origin_file_key = obj.key # path (e.g. folder/test.png) fn_new_bucket = "/".join(origin_file_key.strip("/").split('/')[1:]) if not origin_file_key.endswith("/"): copy_source = {'Bucket': origin_bucket_name, 'Key': origin_file_key} s3_resource.meta.client.copy(copy_source, bucket_name, fn_new_bucket, acl) s3.delete_object(Bucket=origin_bucket_name, Key=origin_file_key) self.logger.info(f"Finished backup of bucket {origin_bucket_name} to {bucket_name}") except Exception: self.logger.error(f"An error occurred while taking a backup of bucket {origin_bucket_name}") raise
import os import json import time import logging import functools from typing import List, Any, Optional, Callable, Union, Tuple, Dict from web3 import Web3 from web3.eth import Contract from web3.contract import ContractFunction from web3.types import ( TxParams, Wei, Address, ChecksumAddress, ENS, Nonce, HexBytes, ) ETH_ADDRESS = "0x0000000000000000000000000000000000000000" logger = logging.getLogger(__name__) AddressLike = Union[Address, ChecksumAddress, ENS] class InvalidToken(Exception): def __init__(self, address: Any) -> None: Exception.__init__(self, f"Invalid token address: {address}") class InsufficientBalance(Exception): def __init__(self, had: int, needed: int) -> None: Exception.__init__(self, f"Insufficient balance. Had {had}, needed {needed}") def _load_abi(name: str) -> str: path = f"{os.path.dirname(os.path.abspath(__file__))}/assets/" with open(os.path.abspath(path + f"{name}.abi")) as f: abi: str = json.load(f) return abi def check_approval(method: Callable) -> Callable: """Decorator to check if user is approved for a token. It approves them if they need to be approved.""" @functools.wraps(method) def approved(self: Any, *args: Any) -> Any: # Check to see if the first token is actually ETH token = args[0] if args[0] != ETH_ADDRESS else None token_two = None # Check second token, if needed if method.__name__ == "make_trade" or method.__name__ == "make_trade_output": token_two = args[1] if args[1] != ETH_ADDRESS else None # Approve both tokens, if needed if token: is_approved = self._is_approved(token) if not is_approved: self.approve(token) if token_two: is_approved = self._is_approved(token_two) if not is_approved: self.approve(token_two) return method(self, *args) return approved def supports(versions: List[int]) -> Callable: def g(f: Callable) -> Callable: @functools.wraps(f) def check_version(self: "Uniswap", *args: List, **kwargs: Dict) -> Any: if self.version not in versions: raise Exception( "Function does not support version of Uniswap passed to constructor" ) return f(self, *args, **kwargs) return check_version return g def _str_to_addr(s: str) -> AddressLike: if s.startswith("0x"): return Address(bytes.fromhex(s[2:])) elif s.endswith(".ens"): return ENS(s) else: raise Exception("Could't convert string {s} to AddressLike") def _addr_to_str(a: AddressLike) -> str: if isinstance(a, bytes): # Address or ChecksumAddress addr: str = Web3.toChecksumAddress("0x" + bytes(a).hex()) return addr elif isinstance(a, str): if a.endswith(".ens"): # Address is ENS raise Exception("ENS not supported for this operation") elif a.startswith("0x"): addr = Web3.toChecksumAddress(a) return addr else: raise InvalidToken(a) def _validate_address(a: AddressLike) -> None: assert _addr_to_str(a) _netid_to_name = {1: "mainnet", 4: "rinkeby"} class Uniswap: def __init__( self, address: Union[str, AddressLike], private_key: str, provider: str = None, web3: Web3 = None, version: int = 1, max_slippage: float = 0.1, ) -> None: self.address: AddressLike = _str_to_addr(address) if isinstance( address, str ) else address self.private_key = private_key self.version = version # TODO: Write tests for slippage self.max_slippage = max_slippage if web3: self.w3 = web3 else: # Initialize web3. Extra provider for testing. self.provider = provider or os.environ["PROVIDER"] self.w3 = Web3( Web3.HTTPProvider(self.provider, request_kwargs={"timeout": 60}) ) netid = int(self.w3.net.version) if netid in _netid_to_name: self.network = _netid_to_name[netid] else: raise Exception(f"Unknown netid: {netid}") logger.info(f"Using {self.w3} ('{self.network}')") self.last_nonce: Nonce = self.w3.eth.getTransactionCount(self.address) # This code automatically approves you for trading on the exchange. # max_approval is to allow the contract to exchange on your behalf. # max_approval_check checks that current approval is above a reasonable number # The program cannot check for max_approval each time because it decreases # with each trade. self.max_approval_hex = f"0x{64 * "f"}" self.max_approval_int = int(self.max_approval_hex, 16) self.max_approval_check_hex = f"0x{15 * "0"}{49 * "f"}" self.max_approval_check_int = int(self.max_approval_check_hex, 16) if self.version == 1: factory_contract_addresses = { "mainnet": "0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95", "ropsten": "0x9c83dCE8CA20E9aAF9D3efc003b2ea62aBC08351", "rinkeby": "0xf5D915570BC477f9B8D6C0E980aA81757A3AaC36", "kovan": "0xD3E51Ef092B2845f10401a0159B2B96e8B6c3D30", "görli": "0x6Ce570d02D73d4c384b46135E87f8C592A8c86dA", } self.factory_contract = self._load_contract( abi_name="uniswap-v1/factory", address=_str_to_addr(factory_contract_addresses[self.network]), ) elif self.version == 2: # For v2 the address is the same on mainnet, Ropsten, Rinkeby, Görli, and Kovan # https://uniswap.org/docs/v2/smart-contracts/factory factory_contract_address_v2 = _str_to_addr( "0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f" ) self.factory_contract = self._load_contract( abi_name="uniswap-v2/factory", address=factory_contract_address_v2, ) self.router_address: AddressLike = _str_to_addr( "0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D" ) """Documented here: https://uniswap.org/docs/v2/smart-contracts/router02/""" self.router = self._load_contract( abi_name="uniswap-v2/router02", address=self.router_address, ) else: raise Exception("Invalid version, only 1 or 2 supported") logger.info(f"Using factory contract: {self.factory_contract}") @supports([1]) def get_all_tokens(self) -> List[dict]: # FIXME: This is a very expensive operation, would benefit greatly from caching tokenCount = self.factory_contract.functions.tokenCount().call() tokens = [] for i in range(tokenCount): address = self.factory_contract.functions.getTokenWithId(i).call() if address == "0x0000000000000000000000000000000000000000": # Token is ETH continue token = self.get_token(address) tokens.append(token) return tokens @supports([1]) def get_token(self, address: AddressLike) -> dict: # FIXME: This function should always return the same output for the same input # and would therefore benefit from caching token_contract = self._load_contract(abi_name="erc20", address=address) try: symbol = token_contract.functions.symbol().call() name = token_contract.functions.name().call() except Exception as e: logger.warning( f"Exception occurred while trying to get token {_addr_to_str(address)}: {e}" ) raise InvalidToken(address) return {"name": name, "symbol": symbol} @supports([1]) def exchange_address_from_token(self, token_addr: AddressLike) -> AddressLike: ex_addr: AddressLike = self.factory_contract.functions.getExchange( token_addr ).call() # TODO: What happens if the token doesn't have an exchange/doesn't exist? Should probably raise an Exception (and test it) return ex_addr @supports([1]) def token_address_from_exchange(self, exchange_addr: AddressLike) -> Address: token_addr: Address = ( self.exchange_contract(ex_addr=exchange_addr) .functions.tokenAddress(exchange_addr) .call() ) return token_addr @functools.lru_cache() @supports([1]) def exchange_contract( self, token_addr: AddressLike = None, ex_addr: AddressLike = None ) -> Contract: if not ex_addr and token_addr: ex_addr = self.exchange_address_from_token(token_addr) if ex_addr is None: raise InvalidToken(token_addr) abi_name = "uniswap-v1/exchange" contract = self._load_contract(abi_name=abi_name, address=ex_addr) logger.info(f"Loaded exchange contract {contract} at {contract.address}") return contract @functools.lru_cache() def erc20_contract(self, token_addr: AddressLike) -> Contract: return self._load_contract(abi_name="erc20", address=token_addr) @supports([2]) def get_weth_address(self) -> Address: address: Address = self.router.functions.WETH().call() return address def _load_contract(self, abi_name: str, address: AddressLike) -> Contract: return self.w3.eth.contract(address=address, abi=_load_abi(abi_name)) # ------ Exchange ------------------------------------------------------------------ @supports([1, 2]) def get_fee_maker(self) -> float: """Get the maker fee.""" return 0 @supports([1, 2]) def get_fee_taker(self) -> float: """Get the taker fee.""" return 0.003 # ------ Market -------------------------------------------------------------------- @supports([1, 2]) def get_eth_token_input_price(self, token: AddressLike, qty: Wei) -> Wei: """Public price for ETH to Token trades with an exact input.""" if self.version == 1: ex = self.exchange_contract(token) price: Wei = ex.functions.getEthToTokenInputPrice(qty).call() elif self.version == 2: price = self.router.functions.getAmountsOut( qty, [self.get_weth_address(), token] ).call()[-1] return price @supports([1, 2]) def get_token_eth_input_price(self, token: AddressLike, qty: int) -> int: """Public price for token to ETH trades with an exact input.""" if self.version == 1: ex = self.exchange_contract(token) price: int = ex.functions.getTokenToEthInputPrice(qty).call() else: price = self.router.functions.getAmountsOut( qty, [token, self.get_weth_address()] ).call()[-1] return price @supports([2]) def get_token_token_input_price( self, token0: AddressLike, token1: AddressLike, qty: int ) -> int: """Public price for token to token trades with an exact input.""" price: int = self.router.functions.getAmountsOut( qty, [token0, self.get_weth_address(), token1] ).call()[-1] return price @supports([1, 2]) def get_eth_token_output_price(self, token: AddressLike, qty: int) -> Wei: """Public price for ETH to Token trades with an exact output.""" if self.version == 1: ex = self.exchange_contract(token) price: Wei = ex.functions.getEthToTokenOutputPrice(qty).call() else: price = self.router.functions.getAmountsIn( qty, [self.get_weth_address(), token] ).call()[0] return price @supports([1, 2]) def get_token_eth_output_price(self, token: AddressLike, qty: Wei) -> int: """Public price for token to ETH trades with an exact output.""" if self.version == 1: ex = self.exchange_contract(token) price: int = ex.functions.getTokenToEthOutputPrice(qty).call() else: price = self.router.functions.getAmountsIn( qty, [token, self.get_weth_address()] ).call()[0] return price @supports([2]) def get_token_token_output_price( self, token0: AddressLike, token1: AddressLike, qty: int ) -> int: """Public price for token to token trades with an exact output.""" price: int = self.router.functions.getAmountsIn( qty, [token0, self.get_weth_address(), token1] ).call()[0] return price # ------ Wallet balance ------------------------------------------------------------ def get_eth_balance(self) -> Wei: """Get the balance of ETH in a wallet.""" return self.w3.eth.getBalance(self.address) def get_token_balance(self, token: AddressLike) -> int: """Get the balance of a token in a wallet.""" _validate_address(token) if _addr_to_str(token) == ETH_ADDRESS: return self.get_eth_balance() erc20 = self.erc20_contract(token) balance: int = erc20.functions.balanceOf(self.address).call() return balance # ------ ERC20 Pool ---------------------------------------------------------------- @supports([1]) def get_ex_eth_balance(self, token: AddressLike) -> int: """Get the balance of ETH in an exchange contract.""" ex_addr: AddressLike = self.exchange_address_from_token(token) return self.w3.eth.getBalance(ex_addr) @supports([1]) def get_ex_token_balance(self, token: AddressLike) -> int: """Get the balance of a token in an exchange contract.""" erc20 = self.erc20_contract(token) balance: int = erc20.functions.balanceOf( self.exchange_address_from_token(token) ).call() return balance # TODO: ADD TOTAL SUPPLY @supports([1]) def get_exchange_rate(self, token: AddressLike) -> float: """Get the current ETH/token exchange rate of the token.""" eth_reserve = self.get_ex_eth_balance(token) token_reserve = self.get_ex_token_balance(token) return float(token_reserve / eth_reserve) # ------ Liquidity ----------------------------------------------------------------- @supports([1]) @check_approval def add_liquidity( self, token: AddressLike, max_eth: Wei, min_liquidity: int = 1 ) -> HexBytes: """Add liquidity to the pool.""" tx_params = self._get_tx_params(max_eth) # Add 1 to avoid rounding errors, per # https://hackmd.io/hthz9hXKQmSyXfMbPsut1g#Add-Liquidity-Calculations max_token = int(max_eth * self.get_exchange_rate(token)) + 10 func_params = [min_liquidity, max_token, self._deadline()] function = self.exchange_contract(token).functions.addLiquidity(*func_params) return self._build_and_send_tx(function, tx_params) @supports([1]) @check_approval def remove_liquidity(self, token: str, max_token: int) -> HexBytes: """Remove liquidity from the pool.""" func_params = [int(max_token), 1, 1, self._deadline()] function = self.exchange_contract(token).functions.removeLiquidity(*func_params) return self._build_and_send_tx(function) # ------ Make Trade ---------------------------------------------------------------- @check_approval def make_trade( self, input_token: AddressLike, output_token: AddressLike, qty: Union[int, Wei], recipient: AddressLike = None, ) -> HexBytes: """Make a trade by defining the qty of the input token.""" if input_token == ETH_ADDRESS: return self._eth_to_token_swap_input(output_token, Wei(qty), recipient) else: balance = self.get_token_balance(input_token) if balance < qty: raise InsufficientBalance(balance, qty) if output_token == ETH_ADDRESS: return self._token_to_eth_swap_input(input_token, qty, recipient) else: return self._token_to_token_swap_input( input_token, qty, output_token, recipient ) @check_approval def make_trade_output( self, input_token: AddressLike, output_token: AddressLike, qty: Union[int, Wei], recipient: AddressLike = None, ) -> HexBytes: """Make a trade by defining the qty of the output token.""" if input_token == ETH_ADDRESS: balance = self.get_eth_balance() need = self.get_eth_token_output_price(output_token, qty) if balance < need: raise InsufficientBalance(balance, need) return self._eth_to_token_swap_output(output_token, qty, recipient) else: if output_token == ETH_ADDRESS: qty = Wei(qty) return self._token_to_eth_swap_output(input_token, qty, recipient) else: return self._token_to_token_swap_output( input_token, qty, output_token, recipient ) def _eth_to_token_swap_input( self, output_token: AddressLike, qty: Wei, recipient: Optional[AddressLike] ) -> HexBytes: """Convert ETH to tokens given an input amount.""" eth_balance = self.get_eth_balance() if qty > eth_balance: raise InsufficientBalance(eth_balance, qty) if self.version == 1: token_funcs = self.exchange_contract(output_token).functions tx_params = self._get_tx_params(qty) func_params: List[Any] = [qty, self._deadline()] if not recipient: function = token_funcs.ethToTokenSwapInput(*func_params) else: func_params.append(recipient) function = token_funcs.ethToTokenTransferInput(*func_params) return self._build_and_send_tx(function, tx_params) else: if recipient is None: recipient = self.address amount_out_min = int( (1 - self.max_slippage) * self.get_eth_token_input_price(output_token, qty) ) return self._build_and_send_tx( self.router.functions.swapExactETHForTokens( amount_out_min, [self.get_weth_address(), output_token], recipient, self._deadline(), ), self._get_tx_params(qty), ) def _token_to_eth_swap_input( self, input_token: AddressLike, qty: int, recipient: Optional[AddressLike] ) -> HexBytes: """Convert tokens to ETH given an input amount.""" # Balance check input_balance = self.get_token_balance(input_token) cost = self.get_token_eth_input_price(input_token, qty) if cost > input_balance: raise InsufficientBalance(input_balance, cost) if self.version == 1: token_funcs = self.exchange_contract(input_token).functions func_params: List[Any] = [qty, 1, self._deadline()] if not recipient: function = token_funcs.tokenToEthSwapInput(*func_params) else: func_params.append(recipient) function = token_funcs.tokenToEthTransferInput(*func_params) return self._build_and_send_tx(function) else: if recipient is None: recipient = self.address return self._build_and_send_tx( self.router.functions.swapExactTokensForETH( qty, int((1 - self.max_slippage) * cost), [input_token, self.get_weth_address()], recipient, self._deadline(), ), ) def _token_to_token_swap_input( self, input_token: AddressLike, qty: int, output_token: AddressLike, recipient: Optional[AddressLike], ) -> HexBytes: """Convert tokens to tokens given an input amount.""" if self.version == 1: token_funcs = self.exchange_contract(input_token).functions # TODO: This might not be correct min_tokens_bought, min_eth_bought = self._calculate_max_input_token( input_token, qty, output_token ) func_params = [ qty, min_tokens_bought, min_eth_bought, self._deadline(), output_token, ] if not recipient: function = token_funcs.tokenToTokenSwapInput(*func_params) else: func_params.insert(len(func_params) - 1, recipient) function = token_funcs.tokenToTokenTransferInput(*func_params) return self._build_and_send_tx(function) else: if recipient is None: recipient = self.address min_tokens_bought = int( (1 - self.max_slippage) * self.get_token_token_input_price(input_token, output_token, qty) ) return self._build_and_send_tx( self.router.functions.swapExactTokensForTokens( qty, min_tokens_bought, [input_token, self.get_weth_address(), output_token], recipient, self._deadline(), ), ) def _eth_to_token_swap_output( self, output_token: AddressLike, qty: int, recipient: Optional[AddressLike] ) -> HexBytes: """Convert ETH to tokens given an output amount.""" if self.version == 1: token_funcs = self.exchange_contract(output_token).functions eth_qty = self.get_eth_token_output_price(output_token, qty) tx_params = self._get_tx_params(eth_qty) func_params: List[Any] = [qty, self._deadline()] if not recipient: function = token_funcs.ethToTokenSwapOutput(*func_params) else: func_params.append(recipient) function = token_funcs.ethToTokenTransferOutput(*func_params) return self._build_and_send_tx(function, tx_params) else: if recipient is None: recipient = self.address eth_qty = self.get_eth_token_output_price(output_token, qty) return self._build_and_send_tx( self.router.functions.swapETHForExactTokens( qty, [self.get_weth_address(), output_token], recipient, self._deadline(), ), self._get_tx_params(eth_qty), ) def _token_to_eth_swap_output( self, input_token: AddressLike, qty: Wei, recipient: Optional[AddressLike] ) -> HexBytes: """Convert tokens to ETH given an output amount.""" # Balance check input_balance = self.get_token_balance(input_token) cost = self.get_token_eth_output_price(input_token, qty) if cost > input_balance: raise InsufficientBalance(input_balance, cost) if self.version == 1: token_funcs = self.exchange_contract(input_token).functions # From https://uniswap.org/docs/v1/frontend-integration/trade-tokens/ # Is all this really necessary? Can't we just use `cost` for max_tokens? outputAmount = qty inputReserve = self.get_ex_token_balance(input_token) outputReserve = self.get_ex_eth_balance(input_token) numerator = outputAmount * inputReserve * 1000 denominator = (outputReserve - outputAmount) * 997 inputAmount = numerator / denominator + 1 max_tokens = int((1 + self.max_slippage) * inputAmount) func_params: List[Any] = [qty, max_tokens, self._deadline()] if not recipient: function = token_funcs.tokenToEthSwapOutput(*func_params) else: func_params.append(recipient) function = token_funcs.tokenToEthTransferOutput(*func_params) return self._build_and_send_tx(function) else: max_tokens = int((1 + self.max_slippage) * cost) return self._build_and_send_tx( self.router.functions.swapTokensForExactETH( qty, max_tokens, [input_token, self.get_weth_address()], self.address, self._deadline(), ), ) def _token_to_token_swap_output( self, input_token: AddressLike, qty: int, output_token: AddressLike, recipient: Optional[AddressLike], ) -> HexBytes: """Convert tokens to tokens given an output amount.""" if self.version == 1: token_funcs = self.exchange_contract(input_token).functions max_tokens_sold, max_eth_sold = self._calculate_max_input_token( input_token, qty, output_token ) tx_params = self._get_tx_params() func_params = [ qty, max_tokens_sold, max_eth_sold, self._deadline(), output_token, ] if not recipient: function = token_funcs.tokenToTokenSwapOutput(*func_params) else: func_params.insert(len(func_params) - 1, recipient) function = token_funcs.tokenToTokenTransferOutput(*func_params) return self._build_and_send_tx(function, tx_params) else: cost = self.get_token_token_output_price(input_token, output_token, qty) amount_in_max = int((1 + self.max_slippage) * cost) return self._build_and_send_tx( self.router.functions.swapTokensForExactTokens( qty, amount_in_max, [input_token, self.get_weth_address(), output_token], self.address, self._deadline(), ), ) # ------ Approval Utils ------------------------------------------------------------ def approve(self, token: AddressLike, max_approval: Optional[int] = None) -> None: """Give an exchange/router max approval of a token.""" max_approval = self.max_approval_int if not max_approval else max_approval contract_addr = ( self.exchange_address_from_token(token) if self.version == 1 else self.router_address ) function = self.erc20_contract(token).functions.approve( contract_addr, max_approval ) logger.info(f"Approving {_addr_to_str(token)}...") tx = self._build_and_send_tx(function) self.w3.eth.waitForTransactionReceipt(tx, timeout=6000) # Add extra sleep to let tx propogate correctly time.sleep(1) def _is_approved(self, token: AddressLike) -> bool: """Check to see if the exchange and token is approved.""" _validate_address(token) if self.version == 1: contract_addr = self.exchange_address_from_token(token) else: contract_addr = self.router_address amount = ( self.erc20_contract(token) .functions.allowance(self.address, contract_addr) .call() ) if amount >= self.max_approval_check_int: return True else: return False # ------ Tx Utils ------------------------------------------------------------------ def _deadline(self) -> int: """Get a predefined deadline. 10min by default (same as the Uniswap SDK).""" return int(time.time()) + 10 * 60 def _build_and_send_tx( self, function: ContractFunction, tx_params: Optional[TxParams] = None ) -> HexBytes: """Build and send a transaction.""" if not tx_params: tx_params = self._get_tx_params() transaction = function.buildTransaction(tx_params) signed_txn = self.w3.eth.account.sign_transaction( transaction, private_key=self.private_key ) # TODO: This needs to get more complicated if we want to support replacing a transaction # FIXME: This does not play nice if transactions are sent from other places using the same wallet. try: return self.w3.eth.sendRawTransaction(signed_txn.rawTransaction) finally: logger.debug(f"nonce: {tx_params["nonce"]}") self.last_nonce = Nonce(tx_params["nonce"] + 1) def _get_tx_params(self, value: Wei = Wei(0), gas: Wei = Wei(250000)) -> TxParams: """Get generic transaction parameters.""" return { "from": _addr_to_str(self.address), "value": value, "gas": gas, "nonce": max( self.last_nonce, self.w3.eth.getTransactionCount(self.address) ), } # ------ Price Calculation Utils --------------------------------------------------- def _calculate_max_input_token( self, input_token: AddressLike, qty: int, output_token: AddressLike ) -> Tuple[int, int]: """ For buy orders (exact output), the cost (input) is calculated. Calculate the max input and max eth sold for a token to token output swap. Equation from: - https://hackmd.io/hthz9hXKQmSyXfMbPsut1g - https://uniswap.org/docs/v1/frontend-integration/trade-tokens/ """ # Buy TokenB with ETH output_amount_b = qty input_reserve_b = self.get_ex_eth_balance(output_token) output_reserve_b = self.get_ex_token_balance(output_token) # Cost numerator_b = output_amount_b * input_reserve_b * 1000 denominator_b = (output_reserve_b - output_amount_b) * 997 input_amount_b = numerator_b / denominator_b + 1 # Buy ETH with TokenA output_amount_a = input_amount_b input_reserve_a = self.get_ex_token_balance(input_token) output_reserve_a = self.get_ex_eth_balance(input_token) # Cost numerator_a = output_amount_a * input_reserve_a * 1000 denominator_a = (output_reserve_a - output_amount_a) * 997 input_amount_a = numerator_a / denominator_a - 1 return int(input_amount_a), int(1.2 * input_amount_b) def _calculate_max_output_token( self, output_token: AddressLike, qty: int, input_token: AddressLike ) -> Tuple[int, int]: """ For sell orders (exact input), the amount bought (output) is calculated. Similar to _calculate_max_input_token, but for an exact input swap. """ # TokenA (ERC20) to ETH conversion inputAmountA = qty inputReserveA = self.get_ex_token_balance(input_token) outputReserveA = self.get_ex_eth_balance(input_token) # Cost numeratorA = inputAmountA * outputReserveA * 997 denominatorA = inputReserveA * 1000 + inputAmountA * 997 outputAmountA = numeratorA / denominatorA # ETH to TokenB conversion inputAmountB = outputAmountA inputReserveB = self.get_ex_token_balance(output_token) outputReserveB = self.get_ex_eth_balance(output_token) # Cost numeratorB = inputAmountB * outputReserveB * 997 denominatorB = inputReserveB * 1000 + inputAmountB * 997 outputAmountB = numeratorB / denominatorB return int(outputAmountB), int(1.2 * outputAmountA) # ------ Test utilities ------------------------------------------------------------ def _buy_test_assets(self) -> None: """ Buys some BAT and DAI. Used in testing. """ ONE_ETH = 1 * 10 ** 18 TEST_AMT = int(0.1 * ONE_ETH) tokens = self._get_token_addresses() for token_name in ["BAT", "DAI"]: token_addr = tokens[token_name.lower()] price = self.get_eth_token_output_price(_str_to_addr(token_addr), TEST_AMT) logger.info(f"Cost of {TEST_AMT} {token_name}: {price}") logger.info("Buying...") tx = self.make_trade_output( tokens["eth"], tokens[token_name.lower()], TEST_AMT ) self.w3.eth.waitForTransactionReceipt(tx) def _get_token_addresses(self) -> Dict[str, str]: """ Returns a dict with addresses for tokens for the current net. Used in testing. """ netid = int(self.w3.net.version) netname = _netid_to_name[netid] if netname == "mainnet": return { "eth": "0x0000000000000000000000000000000000000000", "bat": Web3.toChecksumAddress( "0x0D8775F648430679A709E98d2b0Cb6250d2887EF" ), "dai": Web3.toChecksumAddress( "0x6b175474e89094c44da98b954eedeac495271d0f" ), } elif netname == "rinkeby": return { "eth": "0x0000000000000000000000000000000000000000", "bat": "0xDA5B056Cfb861282B4b59d29c9B395bcC238D29B", "dai": "0x2448eE2641d78CC42D7AD76498917359D961A783", } else: raise Exception(f"Unknown net '{netname}'")
import os import json import time import logging import functools from typing import List, Any, Optional, Callable, Union, Tuple, Dict from web3 import Web3 from web3.eth import Contract from web3.contract import ContractFunction from web3.types import ( TxParams, Wei, Address, ChecksumAddress, ENS, Nonce, HexBytes, ) ETH_ADDRESS = "0x0000000000000000000000000000000000000000" logger = logging.getLogger(__name__) AddressLike = Union[Address, ChecksumAddress, ENS] class InvalidToken(Exception): def __init__(self, address: Any) -> None: Exception.__init__(self, f"Invalid token address: {address}") class InsufficientBalance(Exception): def __init__(self, had: int, needed: int) -> None: Exception.__init__(self, f"Insufficient balance. Had {had}, needed {needed}") def _load_abi(name: str) -> str: path = f"{os.path.dirname(os.path.abspath(__file__))}/assets/" with open(os.path.abspath(path + f"{name}.abi")) as f: abi: str = json.load(f) return abi def check_approval(method: Callable) -> Callable: """Decorator to check if user is approved for a token. It approves them if they need to be approved.""" @functools.wraps(method) def approved(self: Any, *args: Any) -> Any: # Check to see if the first token is actually ETH token = args[0] if args[0] != ETH_ADDRESS else None token_two = None # Check second token, if needed if method.__name__ == "make_trade" or method.__name__ == "make_trade_output": token_two = args[1] if args[1] != ETH_ADDRESS else None # Approve both tokens, if needed if token: is_approved = self._is_approved(token) if not is_approved: self.approve(token) if token_two: is_approved = self._is_approved(token_two) if not is_approved: self.approve(token_two) return method(self, *args) return approved def supports(versions: List[int]) -> Callable: def g(f: Callable) -> Callable: @functools.wraps(f) def check_version(self: "Uniswap", *args: List, **kwargs: Dict) -> Any: if self.version not in versions: raise Exception( "Function does not support version of Uniswap passed to constructor" ) return f(self, *args, **kwargs) return check_version return g def _str_to_addr(s: str) -> AddressLike: if s.startswith("0x"): return Address(bytes.fromhex(s[2:])) elif s.endswith(".ens"): return ENS(s) else: raise Exception("Could't convert string {s} to AddressLike") def _addr_to_str(a: AddressLike) -> str: if isinstance(a, bytes): # Address or ChecksumAddress addr: str = Web3.toChecksumAddress("0x" + bytes(a).hex()) return addr elif isinstance(a, str): if a.endswith(".ens"): # Address is ENS raise Exception("ENS not supported for this operation") elif a.startswith("0x"): addr = Web3.toChecksumAddress(a) return addr else: raise InvalidToken(a) def _validate_address(a: AddressLike) -> None: assert _addr_to_str(a) _netid_to_name = {1: "mainnet", 4: "rinkeby"} class Uniswap: def __init__( self, address: Union[str, AddressLike], private_key: str, provider: str = None, web3: Web3 = None, version: int = 1, max_slippage: float = 0.1, ) -> None: self.address: AddressLike = _str_to_addr(address) if isinstance( address, str ) else address self.private_key = private_key self.version = version # TODO: Write tests for slippage self.max_slippage = max_slippage if web3: self.w3 = web3 else: # Initialize web3. Extra provider for testing. self.provider = provider or os.environ["PROVIDER"] self.w3 = Web3( Web3.HTTPProvider(self.provider, request_kwargs={"timeout": 60}) ) netid = int(self.w3.net.version) if netid in _netid_to_name: self.network = _netid_to_name[netid] else: raise Exception(f"Unknown netid: {netid}") logger.info(f"Using {self.w3} ('{self.network}')") self.last_nonce: Nonce = self.w3.eth.getTransactionCount(self.address) # This code automatically approves you for trading on the exchange. # max_approval is to allow the contract to exchange on your behalf. # max_approval_check checks that current approval is above a reasonable number # The program cannot check for max_approval each time because it decreases # with each trade. self.max_approval_hex = f"0x{64 * 'f'}" self.max_approval_int = int(self.max_approval_hex, 16) self.max_approval_check_hex = f"0x{15 * '0'}{49 * 'f'}" self.max_approval_check_int = int(self.max_approval_check_hex, 16) if self.version == 1: factory_contract_addresses = { "mainnet": "0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95", "ropsten": "0x9c83dCE8CA20E9aAF9D3efc003b2ea62aBC08351", "rinkeby": "0xf5D915570BC477f9B8D6C0E980aA81757A3AaC36", "kovan": "0xD3E51Ef092B2845f10401a0159B2B96e8B6c3D30", "görli": "0x6Ce570d02D73d4c384b46135E87f8C592A8c86dA", } self.factory_contract = self._load_contract( abi_name="uniswap-v1/factory", address=_str_to_addr(factory_contract_addresses[self.network]), ) elif self.version == 2: # For v2 the address is the same on mainnet, Ropsten, Rinkeby, Görli, and Kovan # https://uniswap.org/docs/v2/smart-contracts/factory factory_contract_address_v2 = _str_to_addr( "0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f" ) self.factory_contract = self._load_contract( abi_name="uniswap-v2/factory", address=factory_contract_address_v2, ) self.router_address: AddressLike = _str_to_addr( "0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D" ) """Documented here: https://uniswap.org/docs/v2/smart-contracts/router02/""" self.router = self._load_contract( abi_name="uniswap-v2/router02", address=self.router_address, ) else: raise Exception("Invalid version, only 1 or 2 supported") logger.info(f"Using factory contract: {self.factory_contract}") @supports([1]) def get_all_tokens(self) -> List[dict]: # FIXME: This is a very expensive operation, would benefit greatly from caching tokenCount = self.factory_contract.functions.tokenCount().call() tokens = [] for i in range(tokenCount): address = self.factory_contract.functions.getTokenWithId(i).call() if address == "0x0000000000000000000000000000000000000000": # Token is ETH continue token = self.get_token(address) tokens.append(token) return tokens @supports([1]) def get_token(self, address: AddressLike) -> dict: # FIXME: This function should always return the same output for the same input # and would therefore benefit from caching token_contract = self._load_contract(abi_name="erc20", address=address) try: symbol = token_contract.functions.symbol().call() name = token_contract.functions.name().call() except Exception as e: logger.warning( f"Exception occurred while trying to get token {_addr_to_str(address)}: {e}" ) raise InvalidToken(address) return {"name": name, "symbol": symbol} @supports([1]) def exchange_address_from_token(self, token_addr: AddressLike) -> AddressLike: ex_addr: AddressLike = self.factory_contract.functions.getExchange( token_addr ).call() # TODO: What happens if the token doesn't have an exchange/doesn't exist? Should probably raise an Exception (and test it) return ex_addr @supports([1]) def token_address_from_exchange(self, exchange_addr: AddressLike) -> Address: token_addr: Address = ( self.exchange_contract(ex_addr=exchange_addr) .functions.tokenAddress(exchange_addr) .call() ) return token_addr @functools.lru_cache() @supports([1]) def exchange_contract( self, token_addr: AddressLike = None, ex_addr: AddressLike = None ) -> Contract: if not ex_addr and token_addr: ex_addr = self.exchange_address_from_token(token_addr) if ex_addr is None: raise InvalidToken(token_addr) abi_name = "uniswap-v1/exchange" contract = self._load_contract(abi_name=abi_name, address=ex_addr) logger.info(f"Loaded exchange contract {contract} at {contract.address}") return contract @functools.lru_cache() def erc20_contract(self, token_addr: AddressLike) -> Contract: return self._load_contract(abi_name="erc20", address=token_addr) @supports([2]) def get_weth_address(self) -> Address: address: Address = self.router.functions.WETH().call() return address def _load_contract(self, abi_name: str, address: AddressLike) -> Contract: return self.w3.eth.contract(address=address, abi=_load_abi(abi_name)) # ------ Exchange ------------------------------------------------------------------ @supports([1, 2]) def get_fee_maker(self) -> float: """Get the maker fee.""" return 0 @supports([1, 2]) def get_fee_taker(self) -> float: """Get the taker fee.""" return 0.003 # ------ Market -------------------------------------------------------------------- @supports([1, 2]) def get_eth_token_input_price(self, token: AddressLike, qty: Wei) -> Wei: """Public price for ETH to Token trades with an exact input.""" if self.version == 1: ex = self.exchange_contract(token) price: Wei = ex.functions.getEthToTokenInputPrice(qty).call() elif self.version == 2: price = self.router.functions.getAmountsOut( qty, [self.get_weth_address(), token] ).call()[-1] return price @supports([1, 2]) def get_token_eth_input_price(self, token: AddressLike, qty: int) -> int: """Public price for token to ETH trades with an exact input.""" if self.version == 1: ex = self.exchange_contract(token) price: int = ex.functions.getTokenToEthInputPrice(qty).call() else: price = self.router.functions.getAmountsOut( qty, [token, self.get_weth_address()] ).call()[-1] return price @supports([2]) def get_token_token_input_price( self, token0: AddressLike, token1: AddressLike, qty: int ) -> int: """Public price for token to token trades with an exact input.""" price: int = self.router.functions.getAmountsOut( qty, [token0, self.get_weth_address(), token1] ).call()[-1] return price @supports([1, 2]) def get_eth_token_output_price(self, token: AddressLike, qty: int) -> Wei: """Public price for ETH to Token trades with an exact output.""" if self.version == 1: ex = self.exchange_contract(token) price: Wei = ex.functions.getEthToTokenOutputPrice(qty).call() else: price = self.router.functions.getAmountsIn( qty, [self.get_weth_address(), token] ).call()[0] return price @supports([1, 2]) def get_token_eth_output_price(self, token: AddressLike, qty: Wei) -> int: """Public price for token to ETH trades with an exact output.""" if self.version == 1: ex = self.exchange_contract(token) price: int = ex.functions.getTokenToEthOutputPrice(qty).call() else: price = self.router.functions.getAmountsIn( qty, [token, self.get_weth_address()] ).call()[0] return price @supports([2]) def get_token_token_output_price( self, token0: AddressLike, token1: AddressLike, qty: int ) -> int: """Public price for token to token trades with an exact output.""" price: int = self.router.functions.getAmountsIn( qty, [token0, self.get_weth_address(), token1] ).call()[0] return price # ------ Wallet balance ------------------------------------------------------------ def get_eth_balance(self) -> Wei: """Get the balance of ETH in a wallet.""" return self.w3.eth.getBalance(self.address) def get_token_balance(self, token: AddressLike) -> int: """Get the balance of a token in a wallet.""" _validate_address(token) if _addr_to_str(token) == ETH_ADDRESS: return self.get_eth_balance() erc20 = self.erc20_contract(token) balance: int = erc20.functions.balanceOf(self.address).call() return balance # ------ ERC20 Pool ---------------------------------------------------------------- @supports([1]) def get_ex_eth_balance(self, token: AddressLike) -> int: """Get the balance of ETH in an exchange contract.""" ex_addr: AddressLike = self.exchange_address_from_token(token) return self.w3.eth.getBalance(ex_addr) @supports([1]) def get_ex_token_balance(self, token: AddressLike) -> int: """Get the balance of a token in an exchange contract.""" erc20 = self.erc20_contract(token) balance: int = erc20.functions.balanceOf( self.exchange_address_from_token(token) ).call() return balance # TODO: ADD TOTAL SUPPLY @supports([1]) def get_exchange_rate(self, token: AddressLike) -> float: """Get the current ETH/token exchange rate of the token.""" eth_reserve = self.get_ex_eth_balance(token) token_reserve = self.get_ex_token_balance(token) return float(token_reserve / eth_reserve) # ------ Liquidity ----------------------------------------------------------------- @supports([1]) @check_approval def add_liquidity( self, token: AddressLike, max_eth: Wei, min_liquidity: int = 1 ) -> HexBytes: """Add liquidity to the pool.""" tx_params = self._get_tx_params(max_eth) # Add 1 to avoid rounding errors, per # https://hackmd.io/hthz9hXKQmSyXfMbPsut1g#Add-Liquidity-Calculations max_token = int(max_eth * self.get_exchange_rate(token)) + 10 func_params = [min_liquidity, max_token, self._deadline()] function = self.exchange_contract(token).functions.addLiquidity(*func_params) return self._build_and_send_tx(function, tx_params) @supports([1]) @check_approval def remove_liquidity(self, token: str, max_token: int) -> HexBytes: """Remove liquidity from the pool.""" func_params = [int(max_token), 1, 1, self._deadline()] function = self.exchange_contract(token).functions.removeLiquidity(*func_params) return self._build_and_send_tx(function) # ------ Make Trade ---------------------------------------------------------------- @check_approval def make_trade( self, input_token: AddressLike, output_token: AddressLike, qty: Union[int, Wei], recipient: AddressLike = None, ) -> HexBytes: """Make a trade by defining the qty of the input token.""" if input_token == ETH_ADDRESS: return self._eth_to_token_swap_input(output_token, Wei(qty), recipient) else: balance = self.get_token_balance(input_token) if balance < qty: raise InsufficientBalance(balance, qty) if output_token == ETH_ADDRESS: return self._token_to_eth_swap_input(input_token, qty, recipient) else: return self._token_to_token_swap_input( input_token, qty, output_token, recipient ) @check_approval def make_trade_output( self, input_token: AddressLike, output_token: AddressLike, qty: Union[int, Wei], recipient: AddressLike = None, ) -> HexBytes: """Make a trade by defining the qty of the output token.""" if input_token == ETH_ADDRESS: balance = self.get_eth_balance() need = self.get_eth_token_output_price(output_token, qty) if balance < need: raise InsufficientBalance(balance, need) return self._eth_to_token_swap_output(output_token, qty, recipient) else: if output_token == ETH_ADDRESS: qty = Wei(qty) return self._token_to_eth_swap_output(input_token, qty, recipient) else: return self._token_to_token_swap_output( input_token, qty, output_token, recipient ) def _eth_to_token_swap_input( self, output_token: AddressLike, qty: Wei, recipient: Optional[AddressLike] ) -> HexBytes: """Convert ETH to tokens given an input amount.""" eth_balance = self.get_eth_balance() if qty > eth_balance: raise InsufficientBalance(eth_balance, qty) if self.version == 1: token_funcs = self.exchange_contract(output_token).functions tx_params = self._get_tx_params(qty) func_params: List[Any] = [qty, self._deadline()] if not recipient: function = token_funcs.ethToTokenSwapInput(*func_params) else: func_params.append(recipient) function = token_funcs.ethToTokenTransferInput(*func_params) return self._build_and_send_tx(function, tx_params) else: if recipient is None: recipient = self.address amount_out_min = int( (1 - self.max_slippage) * self.get_eth_token_input_price(output_token, qty) ) return self._build_and_send_tx( self.router.functions.swapExactETHForTokens( amount_out_min, [self.get_weth_address(), output_token], recipient, self._deadline(), ), self._get_tx_params(qty), ) def _token_to_eth_swap_input( self, input_token: AddressLike, qty: int, recipient: Optional[AddressLike] ) -> HexBytes: """Convert tokens to ETH given an input amount.""" # Balance check input_balance = self.get_token_balance(input_token) cost = self.get_token_eth_input_price(input_token, qty) if cost > input_balance: raise InsufficientBalance(input_balance, cost) if self.version == 1: token_funcs = self.exchange_contract(input_token).functions func_params: List[Any] = [qty, 1, self._deadline()] if not recipient: function = token_funcs.tokenToEthSwapInput(*func_params) else: func_params.append(recipient) function = token_funcs.tokenToEthTransferInput(*func_params) return self._build_and_send_tx(function) else: if recipient is None: recipient = self.address return self._build_and_send_tx( self.router.functions.swapExactTokensForETH( qty, int((1 - self.max_slippage) * cost), [input_token, self.get_weth_address()], recipient, self._deadline(), ), ) def _token_to_token_swap_input( self, input_token: AddressLike, qty: int, output_token: AddressLike, recipient: Optional[AddressLike], ) -> HexBytes: """Convert tokens to tokens given an input amount.""" if self.version == 1: token_funcs = self.exchange_contract(input_token).functions # TODO: This might not be correct min_tokens_bought, min_eth_bought = self._calculate_max_input_token( input_token, qty, output_token ) func_params = [ qty, min_tokens_bought, min_eth_bought, self._deadline(), output_token, ] if not recipient: function = token_funcs.tokenToTokenSwapInput(*func_params) else: func_params.insert(len(func_params) - 1, recipient) function = token_funcs.tokenToTokenTransferInput(*func_params) return self._build_and_send_tx(function) else: if recipient is None: recipient = self.address min_tokens_bought = int( (1 - self.max_slippage) * self.get_token_token_input_price(input_token, output_token, qty) ) return self._build_and_send_tx( self.router.functions.swapExactTokensForTokens( qty, min_tokens_bought, [input_token, self.get_weth_address(), output_token], recipient, self._deadline(), ), ) def _eth_to_token_swap_output( self, output_token: AddressLike, qty: int, recipient: Optional[AddressLike] ) -> HexBytes: """Convert ETH to tokens given an output amount.""" if self.version == 1: token_funcs = self.exchange_contract(output_token).functions eth_qty = self.get_eth_token_output_price(output_token, qty) tx_params = self._get_tx_params(eth_qty) func_params: List[Any] = [qty, self._deadline()] if not recipient: function = token_funcs.ethToTokenSwapOutput(*func_params) else: func_params.append(recipient) function = token_funcs.ethToTokenTransferOutput(*func_params) return self._build_and_send_tx(function, tx_params) else: if recipient is None: recipient = self.address eth_qty = self.get_eth_token_output_price(output_token, qty) return self._build_and_send_tx( self.router.functions.swapETHForExactTokens( qty, [self.get_weth_address(), output_token], recipient, self._deadline(), ), self._get_tx_params(eth_qty), ) def _token_to_eth_swap_output( self, input_token: AddressLike, qty: Wei, recipient: Optional[AddressLike] ) -> HexBytes: """Convert tokens to ETH given an output amount.""" # Balance check input_balance = self.get_token_balance(input_token) cost = self.get_token_eth_output_price(input_token, qty) if cost > input_balance: raise InsufficientBalance(input_balance, cost) if self.version == 1: token_funcs = self.exchange_contract(input_token).functions # From https://uniswap.org/docs/v1/frontend-integration/trade-tokens/ # Is all this really necessary? Can't we just use `cost` for max_tokens? outputAmount = qty inputReserve = self.get_ex_token_balance(input_token) outputReserve = self.get_ex_eth_balance(input_token) numerator = outputAmount * inputReserve * 1000 denominator = (outputReserve - outputAmount) * 997 inputAmount = numerator / denominator + 1 max_tokens = int((1 + self.max_slippage) * inputAmount) func_params: List[Any] = [qty, max_tokens, self._deadline()] if not recipient: function = token_funcs.tokenToEthSwapOutput(*func_params) else: func_params.append(recipient) function = token_funcs.tokenToEthTransferOutput(*func_params) return self._build_and_send_tx(function) else: max_tokens = int((1 + self.max_slippage) * cost) return self._build_and_send_tx( self.router.functions.swapTokensForExactETH( qty, max_tokens, [input_token, self.get_weth_address()], self.address, self._deadline(), ), ) def _token_to_token_swap_output( self, input_token: AddressLike, qty: int, output_token: AddressLike, recipient: Optional[AddressLike], ) -> HexBytes: """Convert tokens to tokens given an output amount.""" if self.version == 1: token_funcs = self.exchange_contract(input_token).functions max_tokens_sold, max_eth_sold = self._calculate_max_input_token( input_token, qty, output_token ) tx_params = self._get_tx_params() func_params = [ qty, max_tokens_sold, max_eth_sold, self._deadline(), output_token, ] if not recipient: function = token_funcs.tokenToTokenSwapOutput(*func_params) else: func_params.insert(len(func_params) - 1, recipient) function = token_funcs.tokenToTokenTransferOutput(*func_params) return self._build_and_send_tx(function, tx_params) else: cost = self.get_token_token_output_price(input_token, output_token, qty) amount_in_max = int((1 + self.max_slippage) * cost) return self._build_and_send_tx( self.router.functions.swapTokensForExactTokens( qty, amount_in_max, [input_token, self.get_weth_address(), output_token], self.address, self._deadline(), ), ) # ------ Approval Utils ------------------------------------------------------------ def approve(self, token: AddressLike, max_approval: Optional[int] = None) -> None: """Give an exchange/router max approval of a token.""" max_approval = self.max_approval_int if not max_approval else max_approval contract_addr = ( self.exchange_address_from_token(token) if self.version == 1 else self.router_address ) function = self.erc20_contract(token).functions.approve( contract_addr, max_approval ) logger.info(f"Approving {_addr_to_str(token)}...") tx = self._build_and_send_tx(function) self.w3.eth.waitForTransactionReceipt(tx, timeout=6000) # Add extra sleep to let tx propogate correctly time.sleep(1) def _is_approved(self, token: AddressLike) -> bool: """Check to see if the exchange and token is approved.""" _validate_address(token) if self.version == 1: contract_addr = self.exchange_address_from_token(token) else: contract_addr = self.router_address amount = ( self.erc20_contract(token) .functions.allowance(self.address, contract_addr) .call() ) if amount >= self.max_approval_check_int: return True else: return False # ------ Tx Utils ------------------------------------------------------------------ def _deadline(self) -> int: """Get a predefined deadline. 10min by default (same as the Uniswap SDK).""" return int(time.time()) + 10 * 60 def _build_and_send_tx( self, function: ContractFunction, tx_params: Optional[TxParams] = None ) -> HexBytes: """Build and send a transaction.""" if not tx_params: tx_params = self._get_tx_params() transaction = function.buildTransaction(tx_params) signed_txn = self.w3.eth.account.sign_transaction( transaction, private_key=self.private_key ) # TODO: This needs to get more complicated if we want to support replacing a transaction # FIXME: This does not play nice if transactions are sent from other places using the same wallet. try: return self.w3.eth.sendRawTransaction(signed_txn.rawTransaction) finally: logger.debug(f"nonce: {tx_params['nonce']}") self.last_nonce = Nonce(tx_params["nonce"] + 1) def _get_tx_params(self, value: Wei = Wei(0), gas: Wei = Wei(250000)) -> TxParams: """Get generic transaction parameters.""" return { "from": _addr_to_str(self.address), "value": value, "gas": gas, "nonce": max( self.last_nonce, self.w3.eth.getTransactionCount(self.address) ), } # ------ Price Calculation Utils --------------------------------------------------- def _calculate_max_input_token( self, input_token: AddressLike, qty: int, output_token: AddressLike ) -> Tuple[int, int]: """ For buy orders (exact output), the cost (input) is calculated. Calculate the max input and max eth sold for a token to token output swap. Equation from: - https://hackmd.io/hthz9hXKQmSyXfMbPsut1g - https://uniswap.org/docs/v1/frontend-integration/trade-tokens/ """ # Buy TokenB with ETH output_amount_b = qty input_reserve_b = self.get_ex_eth_balance(output_token) output_reserve_b = self.get_ex_token_balance(output_token) # Cost numerator_b = output_amount_b * input_reserve_b * 1000 denominator_b = (output_reserve_b - output_amount_b) * 997 input_amount_b = numerator_b / denominator_b + 1 # Buy ETH with TokenA output_amount_a = input_amount_b input_reserve_a = self.get_ex_token_balance(input_token) output_reserve_a = self.get_ex_eth_balance(input_token) # Cost numerator_a = output_amount_a * input_reserve_a * 1000 denominator_a = (output_reserve_a - output_amount_a) * 997 input_amount_a = numerator_a / denominator_a - 1 return int(input_amount_a), int(1.2 * input_amount_b) def _calculate_max_output_token( self, output_token: AddressLike, qty: int, input_token: AddressLike ) -> Tuple[int, int]: """ For sell orders (exact input), the amount bought (output) is calculated. Similar to _calculate_max_input_token, but for an exact input swap. """ # TokenA (ERC20) to ETH conversion inputAmountA = qty inputReserveA = self.get_ex_token_balance(input_token) outputReserveA = self.get_ex_eth_balance(input_token) # Cost numeratorA = inputAmountA * outputReserveA * 997 denominatorA = inputReserveA * 1000 + inputAmountA * 997 outputAmountA = numeratorA / denominatorA # ETH to TokenB conversion inputAmountB = outputAmountA inputReserveB = self.get_ex_token_balance(output_token) outputReserveB = self.get_ex_eth_balance(output_token) # Cost numeratorB = inputAmountB * outputReserveB * 997 denominatorB = inputReserveB * 1000 + inputAmountB * 997 outputAmountB = numeratorB / denominatorB return int(outputAmountB), int(1.2 * outputAmountA) # ------ Test utilities ------------------------------------------------------------ def _buy_test_assets(self) -> None: """ Buys some BAT and DAI. Used in testing. """ ONE_ETH = 1 * 10 ** 18 TEST_AMT = int(0.1 * ONE_ETH) tokens = self._get_token_addresses() for token_name in ["BAT", "DAI"]: token_addr = tokens[token_name.lower()] price = self.get_eth_token_output_price(_str_to_addr(token_addr), TEST_AMT) logger.info(f"Cost of {TEST_AMT} {token_name}: {price}") logger.info("Buying...") tx = self.make_trade_output( tokens["eth"], tokens[token_name.lower()], TEST_AMT ) self.w3.eth.waitForTransactionReceipt(tx) def _get_token_addresses(self) -> Dict[str, str]: """ Returns a dict with addresses for tokens for the current net. Used in testing. """ netid = int(self.w3.net.version) netname = _netid_to_name[netid] if netname == "mainnet": return { "eth": "0x0000000000000000000000000000000000000000", "bat": Web3.toChecksumAddress( "0x0D8775F648430679A709E98d2b0Cb6250d2887EF" ), "dai": Web3.toChecksumAddress( "0x6b175474e89094c44da98b954eedeac495271d0f" ), } elif netname == "rinkeby": return { "eth": "0x0000000000000000000000000000000000000000", "bat": "0xDA5B056Cfb861282B4b59d29c9B395bcC238D29B", "dai": "0x2448eE2641d78CC42D7AD76498917359D961A783", } else: raise Exception(f"Unknown net '{netname}'")
""" Built on https://github.com/ranahaani/GNews/blob/master/gnews/gnews.py """ import re import httpx import logging import feedparser import urllib.request from pathlib import Path from bs4 import BeautifulSoup as Soup from typing import List, Union, Dict from .gnews_utils import AVAILABLE_COUNTRIES, AVAILABLE_LANGUAGES, TOPICS, BASE_URL, USER_AGENT, GOOGLE_NEWS_REGEX, GNewsArticle, GNewsResult, OrJson from .article import Article logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO, datefmt='%m/%d/%Y %I:%M:%S %p') logger = logging.getLogger(__name__) async_httpx = httpx.AsyncClient() def process_url(item, exclude_websites): source = item.get('source').get('href') if not all([not re.match(website, source) for website in [f'^http(s)?://(www.)?{website.lower()}.*' for website in exclude_websites]]): return url = item.get('link') if re.match(GOOGLE_NEWS_REGEX, url): url = httpx.head(url).headers.get('location', url) return url async def async_process_url(item, exclude_websites): source = item.get('source').get('href') if not all([not re.match(website, source) for website in [f'^http(s)?://(www.)?{website.lower()}.*' for website in exclude_websites]]): return url = item.get('link') if re.match(GOOGLE_NEWS_REGEX, url): resp = await async_httpx.head(url) #.headers.get('location', url) url = resp.headers.get('location', url) return url class GNews: """ GNews initialization """ def __init__(self, language="en", country="US", max_results=20, period='30d', exclude_websites=None, proxy=None): self.countries = tuple(AVAILABLE_COUNTRIES), self.languages = tuple(AVAILABLE_LANGUAGES), self._max_results = max_results self._language = language self._country = country self._period = period self._exclude_websites = exclude_websites if exclude_websites and isinstance(exclude_websites, list) else [] self._proxy = {'http': proxy, 'https': proxy} if proxy else None def _ceid(self): if self._period: return f'when%3A{self._period}&ceid={self._country}:{self._language}&hl={self._language}&gl={self._country}' return f'&ceid={self._country}:{self._language}&hl={self._language}&gl={self._country}' @property def language(self): return self._language @language.setter def language(self, language): self._language = AVAILABLE_LANGUAGES.get(language, language) @property def exclude_websites(self): return self._exclude_websites @exclude_websites.setter def exclude_websites(self, exclude_websites): self._exclude_websites = exclude_websites @property def max_results(self): return self._max_results @max_results.setter def max_results(self, size): self._max_results = size @property def period(self): return self._period @period.setter def period(self, period): self._period = period @property def country(self): return self._country @country.setter def country(self, country): self._country = AVAILABLE_COUNTRIES.get(country, country) def get_full_article(self, url): try: article = Article(url=url, language=self._language) article.build() except Exception as error: logger.error(error.args[0]) return None return article async def async_get_full_article(self, url): try: article = Article(url=url, language=self._language) await article.async_build() except Exception as error: logger.error(error.args[0]) return None return article def _clean(self, html): soup = Soup(html, features="html.parser") text = soup.get_text() text = text.replace('\xa0', ' ') return text def _process(self, item): url = process_url(item, self._exclude_websites) if url: title = item.get("title", "") item = { 'title': title, 'description': self._clean(item.get("description", "")), 'published_date': item.get("published", ""), 'url': url, 'publisher': item.get("source", {}) } return GNewsResult(**item) async def _async_process(self, item): url = await async_process_url(item, self._exclude_websites) if url: title = item.get("title", "") item = { 'title': title, 'description': self._clean(item.get("description", "")), 'published_date': item.get("published", ""), 'url': url, 'publisher': item.get("source", {}) } return GNewsResult(**item) def get_news(self, key): """ :return: JSON response as nested Python dictionary. """ if key: key = "%20".join(key.split(" ")) url = BASE_URL + '/search?q={}'.format(key) + self._ceid() return self._get_news(url) async def async_get_news(self, key): """ :return: JSON response as nested Python dictionary. """ if key: key = "%20".join(key.split(" ")) url = BASE_URL + '/search?q={}'.format(key) + self._ceid() return await self._async_get_news(url) def get_top_news(self): """ :return: Top News JSON response. """ url = BASE_URL + "?" + self._ceid() return self._get_news(url) async def async_get_top_news(self): """ :return: Top News JSON response. """ url = BASE_URL + "?" + self._ceid() return await self._async_get_news(url) def get_news_by_topic(self, topic: str): f""" :params: TOPIC names i.e {TOPICS} :return: JSON response as nested Python dictionary. """ topic = topic.upper() if topic in TOPICS: url = BASE_URL + '/headlines/section/topic/' + topic + '?' + self._ceid() return self._get_news(url) logger.info(f"Invalid topic. \nAvailable topics are: {", ".join(TOPICS)}.") return [] async def async_get_news_by_topic(self, topic: str): f""" :params: TOPIC names i.e {TOPICS} :return: JSON response as nested Python dictionary. """ topic = topic.upper() if topic in TOPICS: url = BASE_URL + '/headlines/section/topic/' + topic + '?' + self._ceid() return await self._async_get_news(url) logger.info(f"Invalid topic. \nAvailable topics are: {", ".join(TOPICS)}.") return [] def get_news_by_location(self, location: str): """ :params: city/state/country :return: JSON response as nested Python dictionary. """ if location: url = BASE_URL + '/headlines/section/geo/' + location + '?' + self._ceid() return self._get_news(url) logger.warning("Enter a valid location.") return [] async def async_get_news_by_location(self, location: str): """ :params: city/state/country :return: JSON response as nested Python dictionary. """ if location: url = BASE_URL + '/headlines/section/geo/' + location + '?' + self._ceid() return await self._async_get_news(url) logger.warning("Enter a valid location.") return [] def _get_news(self, url): try: if self._proxy: proxy_handler = urllib.request.ProxyHandler(self._proxy) feed_data = feedparser.parse(url, agent=USER_AGENT, handlers=[proxy_handler]) else: feed_data = feedparser.parse(url, agent=USER_AGENT) return [item for item in map(self._process, feed_data.entries[:self._max_results]) if item] except Exception as err: logger.error(err.args[0]) return [] async def _async_get_news(self, url): try: if self._proxy: proxy_handler = urllib.request.ProxyHandler(self._proxy) feed_data = feedparser.parse(url, agent=USER_AGENT, handlers=[proxy_handler]) else: feed_data = feedparser.parse(url, agent=USER_AGENT) rez = [] for i in feed_data.entries[:self._max_results]: # print(i) item = await self._async_process(i) # print(item) if item: rez.append(item) return rez #return [item for item in map(await self._async_process, feed_data.entries[:self._max_results]) if item] except Exception as err: logger.error(err.args[0]) return [] class GNewsCache: api: GNews = None min_wc: int = 350 data: Dict[str, List[Union[GNewsResult, GNewsArticle]]] = {} check: set = set() @classmethod def get_client(cls, **kwargs): if not cls.api: cls.api = GNews(**kwargs) return cls.api @classmethod def add_results(cls, query: str, results: List[Union[GNewsResult, GNewsArticle]]): if not cls.data.get(query): cls.data[query] = [] if results: rez = [r for r in results if r.url not in cls.check] for r in rez: cls.check.add(r.url) cls.data[query].extend(rez) @classmethod async def build_all(cls): """ Builds all items in data into GNewsArticle""" build_items = cls.data.items() for q, items in build_items: logger.info(f'Building {len(items)} items for query:{q}') rez = [await i.async_build() if isinstance(i, GNewsResult) else i for i in items] rez = [i for i in rez if i.num_words >= cls.min_wc] #logger.info(f'Completed {len(rez)} items for query:{q}') cls.data[q] = rez logger.info(f'Completed {len(cls.data[q])} items for query:{q}') @classmethod async def async_get(cls, query: str, **kwargs): c = cls.get_client() rez = await c.async_get_news(key=query) if rez: cls.add_results(query, rez) @classmethod async def async_get_topic(cls, topic: str, **kwargs): c = cls.get_client() rez = await c.async_get_news_by_topic(topic=topic) if rez: cls.add_results(topic, rez) @classmethod async def async_get_top_news(cls, **kwargs): c = cls.get_client() rez = await c.async_get_top_news() if rez: cls.add_results('top_news', rez) @classmethod def get_datalist(cls, filter: List[str] = None) -> List[GNewsArticle]: rez = [] for q, items in cls.data.items(): if not filter or (q in filter): rez.extend(items) return rez @classmethod async def async_dumps(cls, filter: List[str] = None, props: List[str] = None): """ dumps all results into jsonlines format""" await cls.build_all() rez = cls.get_datalist(filter=filter) return '\n'.join(r.dumps(props=props) for r in rez) + '\n' @classmethod async def async_save(cls, filepath: str, filter: List[str] = None, props: List[str] = None): """ saves all results into jsonlines format""" rez = await cls.async_dumps(filter=filter, props=props) p = Path(filepath) with p.open('a', encoding='utf-8') as writer: writer.write(rez) logger.info(f'Saved to {p.as_posix()}') @classmethod def load_from_cache(cls, filepath: str, **kwargs): """ Loads from a jsonlines to construct data""" p = Path(filepath) assert p.exists(), f'{filepath} does not exist' if not cls.data.get('cached'): cls.data['cached'] = [] logger.info(f'Starting cache size: {len(cls.data['cached'])}') with p.open('r', encoding='utf-8') as reader: for line in reader: i = OrJson.loads(line) cls.check.add(i['url']) cls.data['cached'].append(GNewsArticle(**i)) logger.info(f'End cache size: {len(cls.data['cached'])}') @classmethod def load_urls_from_cache(cls, filepath: str, **kwargs): """ Loads from a jsonlines to get all the urls""" p = Path(filepath) with p.open('r', encoding='utf-8') as reader: for line in reader: i = OrJson.loads(line) cls.check.add(i['url']) logger.info(f'Total Url Cache: {len(cls.check)}')
""" Built on https://github.com/ranahaani/GNews/blob/master/gnews/gnews.py """ import re import httpx import logging import feedparser import urllib.request from pathlib import Path from bs4 import BeautifulSoup as Soup from typing import List, Union, Dict from .gnews_utils import AVAILABLE_COUNTRIES, AVAILABLE_LANGUAGES, TOPICS, BASE_URL, USER_AGENT, GOOGLE_NEWS_REGEX, GNewsArticle, GNewsResult, OrJson from .article import Article logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO, datefmt='%m/%d/%Y %I:%M:%S %p') logger = logging.getLogger(__name__) async_httpx = httpx.AsyncClient() def process_url(item, exclude_websites): source = item.get('source').get('href') if not all([not re.match(website, source) for website in [f'^http(s)?://(www.)?{website.lower()}.*' for website in exclude_websites]]): return url = item.get('link') if re.match(GOOGLE_NEWS_REGEX, url): url = httpx.head(url).headers.get('location', url) return url async def async_process_url(item, exclude_websites): source = item.get('source').get('href') if not all([not re.match(website, source) for website in [f'^http(s)?://(www.)?{website.lower()}.*' for website in exclude_websites]]): return url = item.get('link') if re.match(GOOGLE_NEWS_REGEX, url): resp = await async_httpx.head(url) #.headers.get('location', url) url = resp.headers.get('location', url) return url class GNews: """ GNews initialization """ def __init__(self, language="en", country="US", max_results=20, period='30d', exclude_websites=None, proxy=None): self.countries = tuple(AVAILABLE_COUNTRIES), self.languages = tuple(AVAILABLE_LANGUAGES), self._max_results = max_results self._language = language self._country = country self._period = period self._exclude_websites = exclude_websites if exclude_websites and isinstance(exclude_websites, list) else [] self._proxy = {'http': proxy, 'https': proxy} if proxy else None def _ceid(self): if self._period: return f'when%3A{self._period}&ceid={self._country}:{self._language}&hl={self._language}&gl={self._country}' return f'&ceid={self._country}:{self._language}&hl={self._language}&gl={self._country}' @property def language(self): return self._language @language.setter def language(self, language): self._language = AVAILABLE_LANGUAGES.get(language, language) @property def exclude_websites(self): return self._exclude_websites @exclude_websites.setter def exclude_websites(self, exclude_websites): self._exclude_websites = exclude_websites @property def max_results(self): return self._max_results @max_results.setter def max_results(self, size): self._max_results = size @property def period(self): return self._period @period.setter def period(self, period): self._period = period @property def country(self): return self._country @country.setter def country(self, country): self._country = AVAILABLE_COUNTRIES.get(country, country) def get_full_article(self, url): try: article = Article(url=url, language=self._language) article.build() except Exception as error: logger.error(error.args[0]) return None return article async def async_get_full_article(self, url): try: article = Article(url=url, language=self._language) await article.async_build() except Exception as error: logger.error(error.args[0]) return None return article def _clean(self, html): soup = Soup(html, features="html.parser") text = soup.get_text() text = text.replace('\xa0', ' ') return text def _process(self, item): url = process_url(item, self._exclude_websites) if url: title = item.get("title", "") item = { 'title': title, 'description': self._clean(item.get("description", "")), 'published_date': item.get("published", ""), 'url': url, 'publisher': item.get("source", {}) } return GNewsResult(**item) async def _async_process(self, item): url = await async_process_url(item, self._exclude_websites) if url: title = item.get("title", "") item = { 'title': title, 'description': self._clean(item.get("description", "")), 'published_date': item.get("published", ""), 'url': url, 'publisher': item.get("source", {}) } return GNewsResult(**item) def get_news(self, key): """ :return: JSON response as nested Python dictionary. """ if key: key = "%20".join(key.split(" ")) url = BASE_URL + '/search?q={}'.format(key) + self._ceid() return self._get_news(url) async def async_get_news(self, key): """ :return: JSON response as nested Python dictionary. """ if key: key = "%20".join(key.split(" ")) url = BASE_URL + '/search?q={}'.format(key) + self._ceid() return await self._async_get_news(url) def get_top_news(self): """ :return: Top News JSON response. """ url = BASE_URL + "?" + self._ceid() return self._get_news(url) async def async_get_top_news(self): """ :return: Top News JSON response. """ url = BASE_URL + "?" + self._ceid() return await self._async_get_news(url) def get_news_by_topic(self, topic: str): f""" :params: TOPIC names i.e {TOPICS} :return: JSON response as nested Python dictionary. """ topic = topic.upper() if topic in TOPICS: url = BASE_URL + '/headlines/section/topic/' + topic + '?' + self._ceid() return self._get_news(url) logger.info(f"Invalid topic. \nAvailable topics are: {', '.join(TOPICS)}.") return [] async def async_get_news_by_topic(self, topic: str): f""" :params: TOPIC names i.e {TOPICS} :return: JSON response as nested Python dictionary. """ topic = topic.upper() if topic in TOPICS: url = BASE_URL + '/headlines/section/topic/' + topic + '?' + self._ceid() return await self._async_get_news(url) logger.info(f"Invalid topic. \nAvailable topics are: {', '.join(TOPICS)}.") return [] def get_news_by_location(self, location: str): """ :params: city/state/country :return: JSON response as nested Python dictionary. """ if location: url = BASE_URL + '/headlines/section/geo/' + location + '?' + self._ceid() return self._get_news(url) logger.warning("Enter a valid location.") return [] async def async_get_news_by_location(self, location: str): """ :params: city/state/country :return: JSON response as nested Python dictionary. """ if location: url = BASE_URL + '/headlines/section/geo/' + location + '?' + self._ceid() return await self._async_get_news(url) logger.warning("Enter a valid location.") return [] def _get_news(self, url): try: if self._proxy: proxy_handler = urllib.request.ProxyHandler(self._proxy) feed_data = feedparser.parse(url, agent=USER_AGENT, handlers=[proxy_handler]) else: feed_data = feedparser.parse(url, agent=USER_AGENT) return [item for item in map(self._process, feed_data.entries[:self._max_results]) if item] except Exception as err: logger.error(err.args[0]) return [] async def _async_get_news(self, url): try: if self._proxy: proxy_handler = urllib.request.ProxyHandler(self._proxy) feed_data = feedparser.parse(url, agent=USER_AGENT, handlers=[proxy_handler]) else: feed_data = feedparser.parse(url, agent=USER_AGENT) rez = [] for i in feed_data.entries[:self._max_results]: # print(i) item = await self._async_process(i) # print(item) if item: rez.append(item) return rez #return [item for item in map(await self._async_process, feed_data.entries[:self._max_results]) if item] except Exception as err: logger.error(err.args[0]) return [] class GNewsCache: api: GNews = None min_wc: int = 350 data: Dict[str, List[Union[GNewsResult, GNewsArticle]]] = {} check: set = set() @classmethod def get_client(cls, **kwargs): if not cls.api: cls.api = GNews(**kwargs) return cls.api @classmethod def add_results(cls, query: str, results: List[Union[GNewsResult, GNewsArticle]]): if not cls.data.get(query): cls.data[query] = [] if results: rez = [r for r in results if r.url not in cls.check] for r in rez: cls.check.add(r.url) cls.data[query].extend(rez) @classmethod async def build_all(cls): """ Builds all items in data into GNewsArticle""" build_items = cls.data.items() for q, items in build_items: logger.info(f'Building {len(items)} items for query:{q}') rez = [await i.async_build() if isinstance(i, GNewsResult) else i for i in items] rez = [i for i in rez if i.num_words >= cls.min_wc] #logger.info(f'Completed {len(rez)} items for query:{q}') cls.data[q] = rez logger.info(f'Completed {len(cls.data[q])} items for query:{q}') @classmethod async def async_get(cls, query: str, **kwargs): c = cls.get_client() rez = await c.async_get_news(key=query) if rez: cls.add_results(query, rez) @classmethod async def async_get_topic(cls, topic: str, **kwargs): c = cls.get_client() rez = await c.async_get_news_by_topic(topic=topic) if rez: cls.add_results(topic, rez) @classmethod async def async_get_top_news(cls, **kwargs): c = cls.get_client() rez = await c.async_get_top_news() if rez: cls.add_results('top_news', rez) @classmethod def get_datalist(cls, filter: List[str] = None) -> List[GNewsArticle]: rez = [] for q, items in cls.data.items(): if not filter or (q in filter): rez.extend(items) return rez @classmethod async def async_dumps(cls, filter: List[str] = None, props: List[str] = None): """ dumps all results into jsonlines format""" await cls.build_all() rez = cls.get_datalist(filter=filter) return '\n'.join(r.dumps(props=props) for r in rez) + '\n' @classmethod async def async_save(cls, filepath: str, filter: List[str] = None, props: List[str] = None): """ saves all results into jsonlines format""" rez = await cls.async_dumps(filter=filter, props=props) p = Path(filepath) with p.open('a', encoding='utf-8') as writer: writer.write(rez) logger.info(f'Saved to {p.as_posix()}') @classmethod def load_from_cache(cls, filepath: str, **kwargs): """ Loads from a jsonlines to construct data""" p = Path(filepath) assert p.exists(), f'{filepath} does not exist' if not cls.data.get('cached'): cls.data['cached'] = [] logger.info(f'Starting cache size: {len(cls.data["cached"])}') with p.open('r', encoding='utf-8') as reader: for line in reader: i = OrJson.loads(line) cls.check.add(i['url']) cls.data['cached'].append(GNewsArticle(**i)) logger.info(f'End cache size: {len(cls.data["cached"])}') @classmethod def load_urls_from_cache(cls, filepath: str, **kwargs): """ Loads from a jsonlines to get all the urls""" p = Path(filepath) with p.open('r', encoding='utf-8') as reader: for line in reader: i = OrJson.loads(line) cls.check.add(i['url']) logger.info(f'Total Url Cache: {len(cls.check)}')
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations import os from dataclasses import dataclass from pathlib import PurePath from typing import Sequence from pants.engine.engine_aware import EngineAwareParameter from pants.util.dirutil import fast_relpath, longest_dir_prefix from pants.util.strutil import strip_prefix # `:` is used as a delimiter already. Others are reserved for possible future needs. BANNED_CHARS_IN_TARGET_NAME = frozenset(r":!@?/\=") BANNED_CHARS_IN_GENERATED_NAME = frozenset(r":!@?=") class InvalidSpecPath(ValueError): """Indicate an invalid spec path for `Address`.""" class InvalidTargetName(ValueError): """Indicate an invalid target name for `Address`.""" @dataclass(frozen=True) class AddressInput: """A string that has been parsed and normalized using the Address syntax. An AddressInput must be resolved into an Address using the engine (which involves inspecting disk to determine the types of its components). """ path_component: str target_component: str | None = None generated_component: str | None = None def __post_init__(self): if self.target_component is not None or self.path_component == "": if not self.target_component: raise InvalidTargetName( f"Address spec {self.path_component}:{self.target_component} has no name part." ) # A root is okay. if self.path_component == "": return components = self.path_component.split(os.sep) if any(component in (".", "..", "") for component in components): raise InvalidSpecPath( f"Address spec has un-normalized path part '{self.path_component}'" ) if os.path.isabs(self.path_component): raise InvalidSpecPath( f"Address spec has absolute path {self.path_component}; expected a path relative " "to the build root." ) @classmethod def parse( cls, spec: str, relative_to: str | None = None, subproject_roots: Sequence[str] | None = None, ) -> AddressInput: """Parse a string into an AddressInput. :param spec: Target address spec. :param relative_to: path to use for sibling specs, ie: ':another_in_same_build_family', interprets the missing spec_path part as `relative_to`. :param subproject_roots: Paths that correspond with embedded build roots under the current build root. For example: some_target( name='mytarget', dependencies=['path/to/buildfile:targetname'], ) Where `path/to/buildfile:targetname` is the dependent target address spec. In there is no target name component, it defaults the default target in the resulting Address's spec_path. Optionally, specs can be prefixed with '//' to denote an absolute spec path. This is normally not significant except when a spec referring to a root level target is needed from deeper in the tree. For example, in `path/to/buildfile/BUILD`: some_target( name='mytarget', dependencies=[':targetname'], ) The `targetname` spec refers to a target defined in `path/to/buildfile/BUILD*`. If instead you want to reference `targetname` in a root level BUILD file, use the absolute form. For example: some_target( name='mytarget', dependencies=['//:targetname'], ) The spec may be for a generated target: `dir:generator#generated`. The spec may be a file, such as `a/b/c.txt`. It may include a relative address spec at the end, such as `a/b/c.txt:original` or `a/b/c.txt:../original`, to disambiguate which target the file comes from; otherwise, it will be assumed to come from the default target in the directory, i.e. a target which leaves off `name`. """ subproject = ( longest_dir_prefix(relative_to, subproject_roots) if relative_to and subproject_roots else None ) def prefix_subproject(spec_path: str) -> str: if not subproject: return spec_path if spec_path: return os.path.join(subproject, spec_path) return os.path.normpath(subproject) spec_parts = spec.split(":", maxsplit=1) path_component = spec_parts[0] if len(spec_parts) == 1: target_component = None generated_parts = path_component.split("#", maxsplit=1) if len(generated_parts) == 1: generated_component = None else: path_component, generated_component = generated_parts else: generated_parts = spec_parts[1].split("#", maxsplit=1) if len(generated_parts) == 1: target_component = generated_parts[0] generated_component = None else: target_component, generated_component = generated_parts normalized_relative_to = None if relative_to: normalized_relative_to = ( fast_relpath(relative_to, subproject) if subproject else relative_to ) if path_component.startswith("./") and normalized_relative_to: path_component = os.path.join(normalized_relative_to, path_component[2:]) if not path_component and normalized_relative_to: path_component = normalized_relative_to path_component = prefix_subproject(strip_prefix(path_component, "//")) return cls(path_component, target_component, generated_component) def file_to_address(self) -> Address: """Converts to an Address by assuming that the path_component is a file on disk.""" if self.target_component is None: # Use the default target in the same directory as the file. spec_path, relative_file_path = os.path.split(self.path_component) # We validate that this is not a top-level file. We couldn't do this earlier in the # AddressSpec constructor because we weren't sure if the path_spec referred to a file # vs. a directory. if not spec_path: raise InvalidTargetName( "Top-level file specs must include which target they come from, such as " f"`{self.path_component}:original_target`, but {self.path_component} did not " f"have an address." ) return Address(spec_path=spec_path, relative_file_path=relative_file_path) # The target component may be "above" (but not below) the file in the filesystem. # Determine how many levels above the file it is, and validate that the path is relative. parent_count = self.target_component.count(os.path.sep) if parent_count == 0: spec_path, relative_file_path = os.path.split(self.path_component) return Address( spec_path=spec_path, relative_file_path=relative_file_path, target_name=self.target_component, ) expected_prefix = f"..{os.path.sep}" * parent_count if self.target_component[: self.target_component.rfind(os.path.sep) + 1] != expected_prefix: raise InvalidTargetName( "A target may only be defined in a directory containing a file that it owns in " f"the filesystem: `{self.target_component}` is not at-or-above the file " f"`{self.path_component}`." ) # Split the path_component into a spec_path and relative_file_path at the appropriate # position. path_components = self.path_component.split(os.path.sep) if len(path_components) <= parent_count: raise InvalidTargetName( "Targets are addressed relative to the files that they own: " f"`{self.target_component}` is too far above the file `{self.path_component}` to " "be valid." ) offset = -1 * (parent_count + 1) spec_path = os.path.join(*path_components[:offset]) if path_components[:offset] else "" relative_file_path = os.path.join(*path_components[offset:]) target_name = os.path.basename(self.target_component) return Address(spec_path, relative_file_path=relative_file_path, target_name=target_name) def dir_to_address(self) -> Address: """Converts to an Address by assuming that the path_component is a directory on disk.""" return Address( spec_path=self.path_component, target_name=self.target_component, generated_name=self.generated_component, ) class Address(EngineAwareParameter): """The unique address for a `Target`. Targets explicitly declared in BUILD files use the format `path/to:tgt`, whereas targets generated from other targets use the format `path/to:generator#generated`. """ def __init__( self, spec_path: str, *, target_name: str | None = None, generated_name: str | None = None, relative_file_path: str | None = None, ) -> None: """ :param spec_path: The path from the build root to the directory containing the BUILD file for the target. If the target is generated, this is the path to the generator target. :param target_name: The name of the target. For generated targets, this is the name of its target generator. If the `name` is left off (i.e. the default), set to `None`. :param generated_name: The name of what is generated. You can use a file path if the generated target represents an entity from the file system, such as `a/b/c` or `subdir/f.ext`. :param relative_file_path: The relative path from the spec_path to an addressed file, if any. Because files must always be located below targets that apply metadata to them, this will always be relative. """ self.spec_path = spec_path self.generated_name = generated_name self._relative_file_path = relative_file_path if generated_name: if relative_file_path: raise AssertionError( f"Do not use both `generated_name` ({generated_name}) and " f"`relative_file_path` ({relative_file_path})." ) banned_chars = BANNED_CHARS_IN_GENERATED_NAME & set(generated_name) if banned_chars: raise InvalidTargetName( f"The generated name `{generated_name}` (defined in directory " f"{self.spec_path}, the part after `#`) contains banned characters " f"(`{"`,`".join(banned_chars)}`). Please replace " "these characters with another separator character like `_`, `-`, or `/`." ) # If the target_name is the same as the default name would be, we normalize to None. self._target_name = None if target_name and target_name != os.path.basename(self.spec_path): banned_chars = BANNED_CHARS_IN_TARGET_NAME & set(target_name) if banned_chars: raise InvalidTargetName( f"The target name {target_name} (defined in directory {self.spec_path}) " f"contains banned characters (`{"`,`".join(banned_chars)}`). Please replace " "these characters with another separator character like `_` or `-`." ) self._target_name = target_name self._hash = hash( (self.spec_path, self._target_name, self.generated_name, self._relative_file_path) ) if PurePath(spec_path).name.startswith("BUILD"): raise InvalidSpecPath( f"The address {self.spec} has {PurePath(spec_path).name} as the last part of its " f"path, but BUILD is a reserved name. Please make sure that you did not name any " f"directories BUILD." ) @property def is_generated_target(self) -> bool: return self.generated_name is not None or self.is_file_target @property def is_file_target(self) -> bool: return self._relative_file_path is not None @property def is_default_target(self) -> bool: """True if this is address refers to the "default" target in the spec_path. The default target has a target name equal to the directory name. """ return self._target_name is None @property def filename(self) -> str: if self._relative_file_path is None: raise AssertionError( f"Only a file Address (`self.is_file_target`) has a filename: {self}" ) return os.path.join(self.spec_path, self._relative_file_path) @property def target_name(self) -> str: if self._target_name is None: return os.path.basename(self.spec_path) return self._target_name @property def spec(self) -> str: """The canonical string representation of the Address. Prepends '//' if the target is at the root, to disambiguate build root level targets from "relative" spec notation. :API: public """ prefix = "//" if not self.spec_path else "" if self._relative_file_path is not None: file_portion = f"{prefix}{self.filename}" parent_prefix = "../" * self._relative_file_path.count(os.path.sep) return ( file_portion if self._target_name is None and not parent_prefix else f"{file_portion}:{parent_prefix}{self.target_name}" ) target_portion = f":{self._target_name}" if self._target_name is not None else "" generated_portion = f"#{self.generated_name}" if self.generated_name is not None else "" return f"{prefix}{self.spec_path}{target_portion}{generated_portion}" @property def path_safe_spec(self) -> str: """ :API: public """ if self._relative_file_path: parent_count = self._relative_file_path.count(os.path.sep) parent_prefix = "@" * parent_count if parent_count else "." file_portion = f".{self._relative_file_path.replace(os.path.sep, ".")}" else: parent_prefix = "." file_portion = "" if parent_prefix == ".": target_portion = f"{parent_prefix}{self._target_name}" if self._target_name else "" else: target_portion = f"{parent_prefix}{self.target_name}" generated_portion = ( f"@{self.generated_name.replace(os.path.sep, ".")}" if self.generated_name else "" ) return f"{self.spec_path.replace(os.path.sep, ".")}{file_portion}{target_portion}{generated_portion}" def maybe_convert_to_target_generator(self) -> Address: """If this address is generated, convert it to its generator target. Otherwise, return itself unmodified. """ if self.is_generated_target: return self.__class__(self.spec_path, target_name=self._target_name) return self def maybe_convert_to_generated_target(self) -> Address: """If this address is for a file target, convert it into generated target syntax (dir/f.ext:lib -> dir:lib#f.ext). Otherwise, return itself unmodified. """ if self.is_file_target: return self.__class__( self.spec_path, target_name=self._target_name, generated_name=self._relative_file_path, ) return self def __eq__(self, other): if not isinstance(other, Address): return False return ( self.spec_path == other.spec_path and self._target_name == other._target_name and self.generated_name == other.generated_name and self._relative_file_path == other._relative_file_path ) def __hash__(self): return self._hash def __repr__(self) -> str: return f"Address({self.spec})" def __str__(self) -> str: return self.spec def __lt__(self, other): # NB: This ordering is intentional so that we match the spec format: # `{spec_path}{relative_file_path}:{tgt_name}#{generated_name}`. return ( self.spec_path, self._relative_file_path or "", self._target_name or "", self.generated_name or "", ) < ( other.spec_path, other._relative_file_path or "", other._target_name or "", other.generated_name or "", ) def debug_hint(self) -> str: return self.spec @dataclass(frozen=True) class BuildFileAddress: """Represents the address of a type materialized from a BUILD file. TODO: This type should likely be removed in favor of storing this information on Target. """ address: Address # The relative path of the BUILD file this Address came from. rel_path: str
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations import os from dataclasses import dataclass from pathlib import PurePath from typing import Sequence from pants.engine.engine_aware import EngineAwareParameter from pants.util.dirutil import fast_relpath, longest_dir_prefix from pants.util.strutil import strip_prefix # `:` is used as a delimiter already. Others are reserved for possible future needs. BANNED_CHARS_IN_TARGET_NAME = frozenset(r":!@?/\=") BANNED_CHARS_IN_GENERATED_NAME = frozenset(r":!@?=") class InvalidSpecPath(ValueError): """Indicate an invalid spec path for `Address`.""" class InvalidTargetName(ValueError): """Indicate an invalid target name for `Address`.""" @dataclass(frozen=True) class AddressInput: """A string that has been parsed and normalized using the Address syntax. An AddressInput must be resolved into an Address using the engine (which involves inspecting disk to determine the types of its components). """ path_component: str target_component: str | None = None generated_component: str | None = None def __post_init__(self): if self.target_component is not None or self.path_component == "": if not self.target_component: raise InvalidTargetName( f"Address spec {self.path_component}:{self.target_component} has no name part." ) # A root is okay. if self.path_component == "": return components = self.path_component.split(os.sep) if any(component in (".", "..", "") for component in components): raise InvalidSpecPath( f"Address spec has un-normalized path part '{self.path_component}'" ) if os.path.isabs(self.path_component): raise InvalidSpecPath( f"Address spec has absolute path {self.path_component}; expected a path relative " "to the build root." ) @classmethod def parse( cls, spec: str, relative_to: str | None = None, subproject_roots: Sequence[str] | None = None, ) -> AddressInput: """Parse a string into an AddressInput. :param spec: Target address spec. :param relative_to: path to use for sibling specs, ie: ':another_in_same_build_family', interprets the missing spec_path part as `relative_to`. :param subproject_roots: Paths that correspond with embedded build roots under the current build root. For example: some_target( name='mytarget', dependencies=['path/to/buildfile:targetname'], ) Where `path/to/buildfile:targetname` is the dependent target address spec. In there is no target name component, it defaults the default target in the resulting Address's spec_path. Optionally, specs can be prefixed with '//' to denote an absolute spec path. This is normally not significant except when a spec referring to a root level target is needed from deeper in the tree. For example, in `path/to/buildfile/BUILD`: some_target( name='mytarget', dependencies=[':targetname'], ) The `targetname` spec refers to a target defined in `path/to/buildfile/BUILD*`. If instead you want to reference `targetname` in a root level BUILD file, use the absolute form. For example: some_target( name='mytarget', dependencies=['//:targetname'], ) The spec may be for a generated target: `dir:generator#generated`. The spec may be a file, such as `a/b/c.txt`. It may include a relative address spec at the end, such as `a/b/c.txt:original` or `a/b/c.txt:../original`, to disambiguate which target the file comes from; otherwise, it will be assumed to come from the default target in the directory, i.e. a target which leaves off `name`. """ subproject = ( longest_dir_prefix(relative_to, subproject_roots) if relative_to and subproject_roots else None ) def prefix_subproject(spec_path: str) -> str: if not subproject: return spec_path if spec_path: return os.path.join(subproject, spec_path) return os.path.normpath(subproject) spec_parts = spec.split(":", maxsplit=1) path_component = spec_parts[0] if len(spec_parts) == 1: target_component = None generated_parts = path_component.split("#", maxsplit=1) if len(generated_parts) == 1: generated_component = None else: path_component, generated_component = generated_parts else: generated_parts = spec_parts[1].split("#", maxsplit=1) if len(generated_parts) == 1: target_component = generated_parts[0] generated_component = None else: target_component, generated_component = generated_parts normalized_relative_to = None if relative_to: normalized_relative_to = ( fast_relpath(relative_to, subproject) if subproject else relative_to ) if path_component.startswith("./") and normalized_relative_to: path_component = os.path.join(normalized_relative_to, path_component[2:]) if not path_component and normalized_relative_to: path_component = normalized_relative_to path_component = prefix_subproject(strip_prefix(path_component, "//")) return cls(path_component, target_component, generated_component) def file_to_address(self) -> Address: """Converts to an Address by assuming that the path_component is a file on disk.""" if self.target_component is None: # Use the default target in the same directory as the file. spec_path, relative_file_path = os.path.split(self.path_component) # We validate that this is not a top-level file. We couldn't do this earlier in the # AddressSpec constructor because we weren't sure if the path_spec referred to a file # vs. a directory. if not spec_path: raise InvalidTargetName( "Top-level file specs must include which target they come from, such as " f"`{self.path_component}:original_target`, but {self.path_component} did not " f"have an address." ) return Address(spec_path=spec_path, relative_file_path=relative_file_path) # The target component may be "above" (but not below) the file in the filesystem. # Determine how many levels above the file it is, and validate that the path is relative. parent_count = self.target_component.count(os.path.sep) if parent_count == 0: spec_path, relative_file_path = os.path.split(self.path_component) return Address( spec_path=spec_path, relative_file_path=relative_file_path, target_name=self.target_component, ) expected_prefix = f"..{os.path.sep}" * parent_count if self.target_component[: self.target_component.rfind(os.path.sep) + 1] != expected_prefix: raise InvalidTargetName( "A target may only be defined in a directory containing a file that it owns in " f"the filesystem: `{self.target_component}` is not at-or-above the file " f"`{self.path_component}`." ) # Split the path_component into a spec_path and relative_file_path at the appropriate # position. path_components = self.path_component.split(os.path.sep) if len(path_components) <= parent_count: raise InvalidTargetName( "Targets are addressed relative to the files that they own: " f"`{self.target_component}` is too far above the file `{self.path_component}` to " "be valid." ) offset = -1 * (parent_count + 1) spec_path = os.path.join(*path_components[:offset]) if path_components[:offset] else "" relative_file_path = os.path.join(*path_components[offset:]) target_name = os.path.basename(self.target_component) return Address(spec_path, relative_file_path=relative_file_path, target_name=target_name) def dir_to_address(self) -> Address: """Converts to an Address by assuming that the path_component is a directory on disk.""" return Address( spec_path=self.path_component, target_name=self.target_component, generated_name=self.generated_component, ) class Address(EngineAwareParameter): """The unique address for a `Target`. Targets explicitly declared in BUILD files use the format `path/to:tgt`, whereas targets generated from other targets use the format `path/to:generator#generated`. """ def __init__( self, spec_path: str, *, target_name: str | None = None, generated_name: str | None = None, relative_file_path: str | None = None, ) -> None: """ :param spec_path: The path from the build root to the directory containing the BUILD file for the target. If the target is generated, this is the path to the generator target. :param target_name: The name of the target. For generated targets, this is the name of its target generator. If the `name` is left off (i.e. the default), set to `None`. :param generated_name: The name of what is generated. You can use a file path if the generated target represents an entity from the file system, such as `a/b/c` or `subdir/f.ext`. :param relative_file_path: The relative path from the spec_path to an addressed file, if any. Because files must always be located below targets that apply metadata to them, this will always be relative. """ self.spec_path = spec_path self.generated_name = generated_name self._relative_file_path = relative_file_path if generated_name: if relative_file_path: raise AssertionError( f"Do not use both `generated_name` ({generated_name}) and " f"`relative_file_path` ({relative_file_path})." ) banned_chars = BANNED_CHARS_IN_GENERATED_NAME & set(generated_name) if banned_chars: raise InvalidTargetName( f"The generated name `{generated_name}` (defined in directory " f"{self.spec_path}, the part after `#`) contains banned characters " f"(`{'`,`'.join(banned_chars)}`). Please replace " "these characters with another separator character like `_`, `-`, or `/`." ) # If the target_name is the same as the default name would be, we normalize to None. self._target_name = None if target_name and target_name != os.path.basename(self.spec_path): banned_chars = BANNED_CHARS_IN_TARGET_NAME & set(target_name) if banned_chars: raise InvalidTargetName( f"The target name {target_name} (defined in directory {self.spec_path}) " f"contains banned characters (`{'`,`'.join(banned_chars)}`). Please replace " "these characters with another separator character like `_` or `-`." ) self._target_name = target_name self._hash = hash( (self.spec_path, self._target_name, self.generated_name, self._relative_file_path) ) if PurePath(spec_path).name.startswith("BUILD"): raise InvalidSpecPath( f"The address {self.spec} has {PurePath(spec_path).name} as the last part of its " f"path, but BUILD is a reserved name. Please make sure that you did not name any " f"directories BUILD." ) @property def is_generated_target(self) -> bool: return self.generated_name is not None or self.is_file_target @property def is_file_target(self) -> bool: return self._relative_file_path is not None @property def is_default_target(self) -> bool: """True if this is address refers to the "default" target in the spec_path. The default target has a target name equal to the directory name. """ return self._target_name is None @property def filename(self) -> str: if self._relative_file_path is None: raise AssertionError( f"Only a file Address (`self.is_file_target`) has a filename: {self}" ) return os.path.join(self.spec_path, self._relative_file_path) @property def target_name(self) -> str: if self._target_name is None: return os.path.basename(self.spec_path) return self._target_name @property def spec(self) -> str: """The canonical string representation of the Address. Prepends '//' if the target is at the root, to disambiguate build root level targets from "relative" spec notation. :API: public """ prefix = "//" if not self.spec_path else "" if self._relative_file_path is not None: file_portion = f"{prefix}{self.filename}" parent_prefix = "../" * self._relative_file_path.count(os.path.sep) return ( file_portion if self._target_name is None and not parent_prefix else f"{file_portion}:{parent_prefix}{self.target_name}" ) target_portion = f":{self._target_name}" if self._target_name is not None else "" generated_portion = f"#{self.generated_name}" if self.generated_name is not None else "" return f"{prefix}{self.spec_path}{target_portion}{generated_portion}" @property def path_safe_spec(self) -> str: """ :API: public """ if self._relative_file_path: parent_count = self._relative_file_path.count(os.path.sep) parent_prefix = "@" * parent_count if parent_count else "." file_portion = f".{self._relative_file_path.replace(os.path.sep, '.')}" else: parent_prefix = "." file_portion = "" if parent_prefix == ".": target_portion = f"{parent_prefix}{self._target_name}" if self._target_name else "" else: target_portion = f"{parent_prefix}{self.target_name}" generated_portion = ( f"@{self.generated_name.replace(os.path.sep, '.')}" if self.generated_name else "" ) return f"{self.spec_path.replace(os.path.sep, '.')}{file_portion}{target_portion}{generated_portion}" def maybe_convert_to_target_generator(self) -> Address: """If this address is generated, convert it to its generator target. Otherwise, return itself unmodified. """ if self.is_generated_target: return self.__class__(self.spec_path, target_name=self._target_name) return self def maybe_convert_to_generated_target(self) -> Address: """If this address is for a file target, convert it into generated target syntax (dir/f.ext:lib -> dir:lib#f.ext). Otherwise, return itself unmodified. """ if self.is_file_target: return self.__class__( self.spec_path, target_name=self._target_name, generated_name=self._relative_file_path, ) return self def __eq__(self, other): if not isinstance(other, Address): return False return ( self.spec_path == other.spec_path and self._target_name == other._target_name and self.generated_name == other.generated_name and self._relative_file_path == other._relative_file_path ) def __hash__(self): return self._hash def __repr__(self) -> str: return f"Address({self.spec})" def __str__(self) -> str: return self.spec def __lt__(self, other): # NB: This ordering is intentional so that we match the spec format: # `{spec_path}{relative_file_path}:{tgt_name}#{generated_name}`. return ( self.spec_path, self._relative_file_path or "", self._target_name or "", self.generated_name or "", ) < ( other.spec_path, other._relative_file_path or "", other._target_name or "", other.generated_name or "", ) def debug_hint(self) -> str: return self.spec @dataclass(frozen=True) class BuildFileAddress: """Represents the address of a type materialized from a BUILD file. TODO: This type should likely be removed in favor of storing this information on Target. """ address: Address # The relative path of the BUILD file this Address came from. rel_path: str
from data import features from player import message_output import random import time class Character: def __init__(self, params): self.name = params['name'] self.lives = params['lives'] self.energy = params['energy'] self.energy_max = params['energy_max'] self.energy_name = params['energy_name'] self.energy_potion_name = params['energy_potion_name'] self.skill_book = params['skill_book'] self.char_auto = params['char_auto'] self.char_self = params['char_self'] self.char_move = params['char_move'] self.potions = params['item']['potion'] self.ingredient = params['item']['ingredient'] self.brewed_potions = [] self.weapon = params['weapon'] self.strength = params['strength'] self.moral = params['moral'] self.type = params['type'] # ######################### Methods ############################### def place_board(self, len_feld): # OK """Beschreibung gibt eine zufällige x und eine y Position zurück diehnt zur Plazierung von K.I. und Spieler :param len_feld: :return: x und y wert """ width = len_feld height = len_feld max_rnd = width * height pos = random.randint(0, max_rnd-1) x_pos = (pos % width) y_pos = (pos // width) return y_pos, x_pos # move K.I. def auto_move(self, rival_pos, my_pos): # OK """ Beschreibung Vergleicht die X und Y Koordinaten der beiden K.I. 's und gibt neue X und Y Koordinaten für die zu ziehende K.I. :param rival_pos: :param my_pos: :return: y, x, go gibt die Koordinaten X und Y zurück sowie die Richtung(z.b. Norden) """ y = my_pos[0] x = my_pos[1] if y < rival_pos[0] and x < rival_pos[1]: y += 1 x += 1 go = 'Südwesten' elif y > rival_pos[0] and x > rival_pos[1]: y -= 1 x -= 1 go = 'Nordosten' elif y < rival_pos[0] and x > rival_pos[1]: y += 1 x -= 1 go = 'Südenosten' elif y > rival_pos[0] and x < rival_pos[1]: y -= 1 x += 1 go = 'Nordwesten' elif y < rival_pos[0] and x == rival_pos[1]: y += 1 go = 'Süden' elif y > rival_pos[0] and x == rival_pos[1]: y -= 1 go = 'Norden' elif y == rival_pos[0] and x < rival_pos[1]: x += 1 go = 'Osten' elif y == rival_pos[0] and x > rival_pos[1]: x -= 1 go = 'Westen' else: go = 'nicht gefunden' return y, x, go # move Player def self_move_input(self, my_pos, game_field): # OK """ Bescheibung: Vergleicht die X und Y Position des Spielers, speichert die Richtung, die der Spieler gehn kann, in X und Y und gibt diese zurück :param my_pos: :param game_field: :return: y, x, go gibt die Koordinaten X und Y zurück sowie die Richtung(z.b. Norden) Details: X Achse Y|0|_|_| A|_|_|_| c|_|_|_| Spieler steht auf X 0 und Y 0 so kann er nur in 3 Richtungen ziehen Bei X 1 und Y 1 wären es 8 Richtungen Bei falschen oder keinen Eingaben wird die Funktion erneut aufgerufen(rekursiv) """ y = my_pos[0] x = my_pos[1] max_xy = len(game_field) - 1 richtung = "" if y == 0 and x == 0: richtung = input('(d) für E, (c) für SE und (x) für S\n') wertung = ['d', 'c', 'x'] elif y == 0 and x == max_xy: richtung = input('(x) für S, (y) für SE und (a) für W\n') wertung = ['x', 'y', 'a'] elif y == 0 and 0 < x < max_xy: richtung = input('(d) für E, (c) für SE, (x) für S, (y) für SW und (a) für W\n') wertung = ['d', 'c', 'x', 'y', 'a'] elif x == 0 and 0 < y < max_xy: richtung = input('(w) für N, (e) für NE, (d) für E, (c) für SE und (x) für S\n') wertung = ['w', 'e', 'd', 'c', 'x'] elif x == 0 and y == max_xy: richtung = input('(w) für N, (e) für NE, (d) für E\n') wertung = ['w', 'e', 'd'] elif 0 < x < max_xy and 0 < y < max_xy: richtung = input('(w) für N, (e) für NE, (d) für E, (c) für SE\n' '(x) für S, (y) für SW, (a) für W und (q) für NW\n') wertung = ['w', 'e', 'd', 'c', 'x', 'y', 'a', 'q'] elif 0 < x < max_xy and y == max_xy: richtung = input('(w) für N, (e) für NE, (d) für E, (a) für W und (q) für NW\n') wertung = ['w', 'e', 'd', 'a', 'q'] elif 0 == max_xy and y == max_xy: richtung = input('(w) für N, (a) für W und (q) für NW\n') wertung = ['w', 'a', 'q'] else: richtung = "" wertung = "error" print(f"self_move keine positions eingabe") # TEST if richtung not in wertung: message_output(features['message']['wrong_entry']) return self.self_move_input(my_pos, game_field) return richtung def self_move(self, my_pos, richtung): # OK """Beschreibung Je nach dem welche Richtung der User eingegeben hat, wird die neue Richtung in X und Y gespeichert und zurückgegeben :param my_pos: X und Y meiner Position :param richtung: wird von der Funktion self_move_input() gegeben :return: gibt X, Y und go(die Richtung) zurück """ y = my_pos[0] x = my_pos[1] if richtung: if richtung == 'w': y -= 1 go = 'Norden' elif richtung == 'e': y -= 1 x += 1 go = 'Nordosten' elif richtung == 'd': x += 1 go = 'Osten' elif richtung == 'c': y += 1 x += 1 go = 'Südosten' elif richtung == 'x': y += 1 go = 'Süden' elif richtung == 'y': y += 1 x -= 1 go = 'Südwest' elif richtung == 'a': x -= 1 go = 'Westen' elif richtung == 'q': y -= 1 x -= 1 go = 'Nordwesten' else: go = 'Da ging erwas schief' # TEST else: go = 'false' # TEST return y, x, go def show_resources(self): # OK """Beschreibung: Zeigt die Ressourcen des Spielers an (0|10 Holz 0|10 Sonnengras 0|10 Mondstengel) :return: None """ message_output(f"{self.ingredient["wood"]}|{self.ingredient["wood" + "_max"]} {features["loot"]["wood"]["name"]}" f" {self.ingredient["sungrass"]}|{self.ingredient["sungrass" + "_max"]} {features["loot"]["sungrass"]["name"]}" f" {self.ingredient["moonstems"]}|{self.ingredient["moonstems" + "_max"]} {features["loot"]["moonstems"]["name"]}") def tile_action(self, tile): """Beschreibung Die beiden Funktionen resource_collect() und building_action() sind in dieser Funktion um konflikte zu vermeiden :param tile: :return: None Details: Hier wird abgefrag was sich auf dem tile des Spielfeldes befindet wo der User hingeht falls es eine Resource ist wird die Funtion resource_collect() aufgerufen falls es ein ManaSchrein ist die building_action() Funktion kann hier noch weiter ausgebaut werden """ if tile in features['loot']['loot_list']: self.resource_collect(tile) elif tile in features['building']['building_list']: self.building_action(tile) else: message_output('Hier gibt es nix\n') def resource_collect(self, resource): # OK """Beschreibung: Diese Funktion diehnt zum ernten von Ressourcen und zur Ausgabe von Nachrichten der einzelnen ernte Schritte mit verzögerung von 1 sec :param resource: wie (Holz, Pflanze ...) wird übergeben :return: loot_count Details: Jede Ressource hat verschiedenen Nachrichten die in einem dict gespeichert sind und ausgegben werden sollen z.b. 'wood': { 'name': 'Holz', 'count_list': [3, 4, 5], 'message': { 'first': 'sieht einen Baum', 'input': 'zum ernten dück (e), weitergehn mit space\n', # alle Eingaben ausser e führen zum move auf das Ressourcenfeld 'pick_beginning': 'holt die Axt raus', 'pick': 'hack', # soll 3 bis 5 mal mit einer Verzögerung von 1 sec ausgegeben werden 'item_falls': 'Baum fällt', 'pick_end': 'packt die Axt ein', 'loot': 'sammelt' } Es kann nur eine maximalwert an Ressourcen geerntet werden 1* ist dieser erreicht kommt eine Meldung das diese Ressource nicht mehr geerntet werden kann und eine Anzeige 10|10 ressourcen name 2* falls meine Ressourcen kleiner sind als der Maxwert und der User e gedrückt hat werden die einzelnen Ernteschritte ausgegeben und eine zufällige zahl in loot_count gespeichert loot_list = features['loot'][resource]['count_list'] eine zahlen liste random.shuffle(loot_list) loot_count = loot_list[0] 3* falls meine Ressource + loot_count kleiner als der Maxwert ist dann ist Ressource = Ressource + loot_count und es werden 2 Nachrichten ausgegeben z.b. Karina sammelt 2 Mondstengel ein Karina hat 2|10 Mondstengel 4* falls meine Ressource kleine ist als der Maxwert und der Maxwert kleiner ist als meine Ressource + loot_count dann ist der loot_count = Maxwert - meine Ressource """ message_output(f"{features["loot"][resource]["message"]["first"]}") if self.ingredient[resource] == self.ingredient[resource + '_max']: # 1* message_output(f"Du kannst nicht mehr {features["loot"][resource]["name"]} tragen " f"{self.ingredient[resource]} | {self.ingredient[resource + "_max"]} {features["loot"][resource]["name"]}") elif self.ingredient[resource] < self.ingredient[resource + '_max']: # 2* action = input(features['loot'][resource]['message']['input']) if action == 'e': message_output(f"{features["loot"][resource]["message"]["pick_beginning"]}") time.sleep(1) count = 0 rand_number = random.randint(3, 5) while count < rand_number: message_output(f"{features["loot"][resource]["message"]["pick"]}") time.sleep(1) count += 1 message_output(f"{features["loot"][resource]["message"]["item_falls"]}") time.sleep(1) message_output(f"{features["loot"][resource]["message"]["pick_end"]}") time.sleep(1) loot_list = features['loot'][resource]['count_list'] random.shuffle(loot_list) loot_count = loot_list[0] if self.ingredient[resource] + loot_count <= self.ingredient[resource + '_max']: # 3* self.ingredient[resource] += loot_count message_output(f"{self.name} {features["loot"][resource]["message"]["loot"]} {loot_count} " f"{features["loot"][resource]["name"]} ein") message_output(f"{self.name} hat {self.ingredient[resource]}|{self.ingredient[resource + "_max"]}" f" {features["loot"][resource]["name"]}") message_output() return loot_count elif self.ingredient[resource] < self.ingredient[resource + '_max'] < self.ingredient[resource] + \ loot_count: # 4* loot_count = self.ingredient[resource + '_max'] - self.ingredient[resource] self.ingredient[resource] += loot_count message_output(f"{self.name} {features["loot"][resource]["message"]["loot"]} {loot_count} " f"{features["loot"][resource]["name"]} ein") message_output(f"{self.name} hat {self.ingredient[resource]}|{self.ingredient[resource + "_max"]}" f" {features["loot"][resource]["name"]}") message_output() return loot_count def building_action(self, building): if building in features['building']['building_list']: # erste buildin message message_output(f"{features["building"][building]["message"]["first"].format(features["building"][building]["resource_coast"]["Holz"])}") # Abfrage ob genug resourcen vorhanden sind if features['building'][building]['resource_coast']['Holz'] <= self.ingredient['wood']: message_output(f"{features["building"][building]["message"]["resourcen_check"].format(self.ingredient["wood"])}") action = input(features['building'][building]['message']['input']) if action == 'e': message_output(f"{features["building"][building]["message"]["wood_campfire"]}") self.ingredient['wood'] -= features['building'][building]['resource_coast']['Holz'] time.sleep(1) count = 0 rand_number = random.randint(7, 10) while count < rand_number: if self.energy_max >= self.energy: message_output(f"{features["building"][building]["message"]["fire"]}") rand_energy = random.randint(8, 14) self.energy += rand_energy message_output(f"{self.energy} {self.energy_name}") time.sleep(1) count += 1 elif self.energy_max < self.energy: if rand_number > count: count += 1 message_output(f"{features["building"][building]["message"]["fire"]}") time.sleep(1) message_output(f"{features["building"][building]["message"]["item_falls"]}") time.sleep(1) message_output(f"{features["building"][building]["message"]["pick_end"]}") # Hp und Ma oder Sta anzeigen time.sleep(1) else: # falls nicht Meldung ausgeben wieviel noch fehlt need_resource = features['building'][building]['resource_coast']['Holz'] - self.ingredient['wood'] message_output(f"{features["building"][building]["message"]["need_resourcen"].format(need_resource)}") else: message_output('Manaschrein ging shief', building) # Test Ausgabe def use_potion(self, potion_name): """Beschreibung: Falls K.I. oder der Spieler Tränke haben, werden diese benutzt um Mana oder Stamina zu erhöhen, der Trank wird aus der Itemliste gestrichen. Desweiteren wird Abgefragt ob es sich um einen Trank handelt Ausgabe im Singular (hat noch einen) oder um mehrere Ausgabe im Plural (hat noch ) oder um keinen Trank (hat keinen) :param potion_name: :return: """ # Abfragen ob Potion vorhanden sind if self.potions[potion_name] > 0: effect = features["potion"][self.energy_potion_name]["effect"] message_output(f'{self.name} {features['potion'][self.energy_potion_name]['message']}') self.energy += features["potion"][self.energy_potion_name][effect] # mana oder sta wird erhöht self.potions[potion_name] -= 1 # eine Potion wird abgezogen if self.potions[potion_name] > 1: # abfrage für Singular | Plural message_output(f'{self.name} hat noch {self.potions[potion_name]} ' f'Flaschen {features['potion'][self.energy_potion_name]['name']['p']}') elif self.potions[potion_name] == 1: # abfrage für Singular | Plural message_output(f'{self.name} hat noch einen {features['potion'][self.energy_potion_name]['name']['s']}') elif self.potions[potion_name] == 0: message_output(f'{self.name} hat keinen {features['potion'][self.energy_potion_name]['name']['s']}') def auto_attack(self, enemy): """Beschreibung: Beschreibt den Kampfablauf der beiden K.I.'s :param enemy: die gegen K.I. :return: gibt die damage zurück (wird zur Anzeige benötigt |trifft mit 10 ..| ) Details: energi ist das Mana bei Magierklassen und Stamina bei den Kämpferklassen 1* Zuerst wird abgefragt ob die energie kleiner ist als die energiekosten des skills falls ja wird mit der Funktion use_potion('energypotion') ein Trank benutzt und die Energie wird um einen bestimmten Wert aufgefüllt 2* falls kein Trank mehr vorhanden ist und die Energikosten über der Energie sind, kämpft die K.I. mit der Waffe, Schaden(damage) wird der gegen K.I. vom den Lebenspunkte abgezogen wenn die Lebenspunkte größer gleich dem Schaden sind anderenfalls werden die Lebenspunkte des Gegners aud 0 gesetzt, so kann der Gegner keine minus Lebenspunkte haben 3* wie 2* nur das der Schaden von den Skill atacken kommt, hier werden die Energikosten der Energie der K.I. abgezogen """ damage = 0 if self.energy < features["skill"][self.skill_book[0]]["energy_cost"]: # 1* self.use_potion('energypotion') if self.potions['energypotion'] < 1 and \ self.energy < features["skill"][self.skill_book[0]]["energy_cost"]: # 2* message_output(f'{self.name} {self.weapon['message']} {enemy.name}') damage = 10 if enemy.lives >= damage: enemy.lives -= damage else: enemy.lives = 0 if self.energy >= features["skill"][self.skill_book[0]]["energy_cost"]: # 3* message_output(f'{self.name} {features['skill'][self.skill_book[0]]['message'] } {enemy.name}') damage = (features['skill'][self.skill_book[0]]['dmg'] * self.lives) // 100 if enemy.lives >= damage: enemy.lives -= damage else: enemy.lives = 0 self.energy -= features['skill'][self.skill_book[0]]['energy_cost'] return damage def fight(self, enemy): pass # ######################### Override Methods ############################### def __str__(self) -> str: return f'Name: {self.name} \ init_lives {self.lives} \ lives {self.energy} \ strength {self.weapon}\ char {self.char_self}' # ######################### Getter Setter ########################## # @property # def name(self): # return self.__name # # @name.setter # def name(self, name): # self.__name = name # # @property # def lives(self): # return self.__lives # # @lives.setter # def lives(self, lives): # self.__lives = lives # # @property # def strength(self): # return self.__strength # # @strength.setter # def strength(self, strength): # self.__strength = strength # # @property # def char_auto(self): # return self.__char_auto # # @char_auto.setter # def char_auto(self, char_auto): # self.__char_auto = char_auto # # @property # def char_move(self): # return self.__char_move # # @char_move.setter # def char_move(self, char_move): # self.__char_move = char_move # # @property # def potions(self): # return self.__potions # # @potions.setter # def potions(self, potions): # self.__potions = potions # # @property # def brewed_potions(self): # return self.__brewed_potions # # @brewed_potions.setter # def brewed_potions(self, brewed_potions): # self.__brewed_potions = brewed_potions
from data import features from player import message_output import random import time class Character: def __init__(self, params): self.name = params['name'] self.lives = params['lives'] self.energy = params['energy'] self.energy_max = params['energy_max'] self.energy_name = params['energy_name'] self.energy_potion_name = params['energy_potion_name'] self.skill_book = params['skill_book'] self.char_auto = params['char_auto'] self.char_self = params['char_self'] self.char_move = params['char_move'] self.potions = params['item']['potion'] self.ingredient = params['item']['ingredient'] self.brewed_potions = [] self.weapon = params['weapon'] self.strength = params['strength'] self.moral = params['moral'] self.type = params['type'] # ######################### Methods ############################### def place_board(self, len_feld): # OK """Beschreibung gibt eine zufällige x und eine y Position zurück diehnt zur Plazierung von K.I. und Spieler :param len_feld: :return: x und y wert """ width = len_feld height = len_feld max_rnd = width * height pos = random.randint(0, max_rnd-1) x_pos = (pos % width) y_pos = (pos // width) return y_pos, x_pos # move K.I. def auto_move(self, rival_pos, my_pos): # OK """ Beschreibung Vergleicht die X und Y Koordinaten der beiden K.I. 's und gibt neue X und Y Koordinaten für die zu ziehende K.I. :param rival_pos: :param my_pos: :return: y, x, go gibt die Koordinaten X und Y zurück sowie die Richtung(z.b. Norden) """ y = my_pos[0] x = my_pos[1] if y < rival_pos[0] and x < rival_pos[1]: y += 1 x += 1 go = 'Südwesten' elif y > rival_pos[0] and x > rival_pos[1]: y -= 1 x -= 1 go = 'Nordosten' elif y < rival_pos[0] and x > rival_pos[1]: y += 1 x -= 1 go = 'Südenosten' elif y > rival_pos[0] and x < rival_pos[1]: y -= 1 x += 1 go = 'Nordwesten' elif y < rival_pos[0] and x == rival_pos[1]: y += 1 go = 'Süden' elif y > rival_pos[0] and x == rival_pos[1]: y -= 1 go = 'Norden' elif y == rival_pos[0] and x < rival_pos[1]: x += 1 go = 'Osten' elif y == rival_pos[0] and x > rival_pos[1]: x -= 1 go = 'Westen' else: go = 'nicht gefunden' return y, x, go # move Player def self_move_input(self, my_pos, game_field): # OK """ Bescheibung: Vergleicht die X und Y Position des Spielers, speichert die Richtung, die der Spieler gehn kann, in X und Y und gibt diese zurück :param my_pos: :param game_field: :return: y, x, go gibt die Koordinaten X und Y zurück sowie die Richtung(z.b. Norden) Details: X Achse Y|0|_|_| A|_|_|_| c|_|_|_| Spieler steht auf X 0 und Y 0 so kann er nur in 3 Richtungen ziehen Bei X 1 und Y 1 wären es 8 Richtungen Bei falschen oder keinen Eingaben wird die Funktion erneut aufgerufen(rekursiv) """ y = my_pos[0] x = my_pos[1] max_xy = len(game_field) - 1 richtung = "" if y == 0 and x == 0: richtung = input('(d) für E, (c) für SE und (x) für S\n') wertung = ['d', 'c', 'x'] elif y == 0 and x == max_xy: richtung = input('(x) für S, (y) für SE und (a) für W\n') wertung = ['x', 'y', 'a'] elif y == 0 and 0 < x < max_xy: richtung = input('(d) für E, (c) für SE, (x) für S, (y) für SW und (a) für W\n') wertung = ['d', 'c', 'x', 'y', 'a'] elif x == 0 and 0 < y < max_xy: richtung = input('(w) für N, (e) für NE, (d) für E, (c) für SE und (x) für S\n') wertung = ['w', 'e', 'd', 'c', 'x'] elif x == 0 and y == max_xy: richtung = input('(w) für N, (e) für NE, (d) für E\n') wertung = ['w', 'e', 'd'] elif 0 < x < max_xy and 0 < y < max_xy: richtung = input('(w) für N, (e) für NE, (d) für E, (c) für SE\n' '(x) für S, (y) für SW, (a) für W und (q) für NW\n') wertung = ['w', 'e', 'd', 'c', 'x', 'y', 'a', 'q'] elif 0 < x < max_xy and y == max_xy: richtung = input('(w) für N, (e) für NE, (d) für E, (a) für W und (q) für NW\n') wertung = ['w', 'e', 'd', 'a', 'q'] elif 0 == max_xy and y == max_xy: richtung = input('(w) für N, (a) für W und (q) für NW\n') wertung = ['w', 'a', 'q'] else: richtung = "" wertung = "error" print(f"self_move keine positions eingabe") # TEST if richtung not in wertung: message_output(features['message']['wrong_entry']) return self.self_move_input(my_pos, game_field) return richtung def self_move(self, my_pos, richtung): # OK """Beschreibung Je nach dem welche Richtung der User eingegeben hat, wird die neue Richtung in X und Y gespeichert und zurückgegeben :param my_pos: X und Y meiner Position :param richtung: wird von der Funktion self_move_input() gegeben :return: gibt X, Y und go(die Richtung) zurück """ y = my_pos[0] x = my_pos[1] if richtung: if richtung == 'w': y -= 1 go = 'Norden' elif richtung == 'e': y -= 1 x += 1 go = 'Nordosten' elif richtung == 'd': x += 1 go = 'Osten' elif richtung == 'c': y += 1 x += 1 go = 'Südosten' elif richtung == 'x': y += 1 go = 'Süden' elif richtung == 'y': y += 1 x -= 1 go = 'Südwest' elif richtung == 'a': x -= 1 go = 'Westen' elif richtung == 'q': y -= 1 x -= 1 go = 'Nordwesten' else: go = 'Da ging erwas schief' # TEST else: go = 'false' # TEST return y, x, go def show_resources(self): # OK """Beschreibung: Zeigt die Ressourcen des Spielers an (0|10 Holz 0|10 Sonnengras 0|10 Mondstengel) :return: None """ message_output(f"{self.ingredient['wood']}|{self.ingredient['wood' + '_max']} {features['loot']['wood']['name']}" f" {self.ingredient['sungrass']}|{self.ingredient['sungrass' + '_max']} {features['loot']['sungrass']['name']}" f" {self.ingredient['moonstems']}|{self.ingredient['moonstems' + '_max']} {features['loot']['moonstems']['name']}") def tile_action(self, tile): """Beschreibung Die beiden Funktionen resource_collect() und building_action() sind in dieser Funktion um konflikte zu vermeiden :param tile: :return: None Details: Hier wird abgefrag was sich auf dem tile des Spielfeldes befindet wo der User hingeht falls es eine Resource ist wird die Funtion resource_collect() aufgerufen falls es ein ManaSchrein ist die building_action() Funktion kann hier noch weiter ausgebaut werden """ if tile in features['loot']['loot_list']: self.resource_collect(tile) elif tile in features['building']['building_list']: self.building_action(tile) else: message_output('Hier gibt es nix\n') def resource_collect(self, resource): # OK """Beschreibung: Diese Funktion diehnt zum ernten von Ressourcen und zur Ausgabe von Nachrichten der einzelnen ernte Schritte mit verzögerung von 1 sec :param resource: wie (Holz, Pflanze ...) wird übergeben :return: loot_count Details: Jede Ressource hat verschiedenen Nachrichten die in einem dict gespeichert sind und ausgegben werden sollen z.b. 'wood': { 'name': 'Holz', 'count_list': [3, 4, 5], 'message': { 'first': 'sieht einen Baum', 'input': 'zum ernten dück (e), weitergehn mit space\n', # alle Eingaben ausser e führen zum move auf das Ressourcenfeld 'pick_beginning': 'holt die Axt raus', 'pick': 'hack', # soll 3 bis 5 mal mit einer Verzögerung von 1 sec ausgegeben werden 'item_falls': 'Baum fällt', 'pick_end': 'packt die Axt ein', 'loot': 'sammelt' } Es kann nur eine maximalwert an Ressourcen geerntet werden 1* ist dieser erreicht kommt eine Meldung das diese Ressource nicht mehr geerntet werden kann und eine Anzeige 10|10 ressourcen name 2* falls meine Ressourcen kleiner sind als der Maxwert und der User e gedrückt hat werden die einzelnen Ernteschritte ausgegeben und eine zufällige zahl in loot_count gespeichert loot_list = features['loot'][resource]['count_list'] eine zahlen liste random.shuffle(loot_list) loot_count = loot_list[0] 3* falls meine Ressource + loot_count kleiner als der Maxwert ist dann ist Ressource = Ressource + loot_count und es werden 2 Nachrichten ausgegeben z.b. Karina sammelt 2 Mondstengel ein Karina hat 2|10 Mondstengel 4* falls meine Ressource kleine ist als der Maxwert und der Maxwert kleiner ist als meine Ressource + loot_count dann ist der loot_count = Maxwert - meine Ressource """ message_output(f"{features['loot'][resource]['message']['first']}") if self.ingredient[resource] == self.ingredient[resource + '_max']: # 1* message_output(f"Du kannst nicht mehr {features['loot'][resource]['name']} tragen " f"{self.ingredient[resource]} | {self.ingredient[resource + '_max']} {features['loot'][resource]['name']}") elif self.ingredient[resource] < self.ingredient[resource + '_max']: # 2* action = input(features['loot'][resource]['message']['input']) if action == 'e': message_output(f"{features['loot'][resource]['message']['pick_beginning']}") time.sleep(1) count = 0 rand_number = random.randint(3, 5) while count < rand_number: message_output(f"{features['loot'][resource]['message']['pick']}") time.sleep(1) count += 1 message_output(f"{features['loot'][resource]['message']['item_falls']}") time.sleep(1) message_output(f"{features['loot'][resource]['message']['pick_end']}") time.sleep(1) loot_list = features['loot'][resource]['count_list'] random.shuffle(loot_list) loot_count = loot_list[0] if self.ingredient[resource] + loot_count <= self.ingredient[resource + '_max']: # 3* self.ingredient[resource] += loot_count message_output(f"{self.name} {features['loot'][resource]['message']['loot']} {loot_count} " f"{features['loot'][resource]['name']} ein") message_output(f"{self.name} hat {self.ingredient[resource]}|{self.ingredient[resource + '_max']}" f" {features['loot'][resource]['name']}") message_output() return loot_count elif self.ingredient[resource] < self.ingredient[resource + '_max'] < self.ingredient[resource] + \ loot_count: # 4* loot_count = self.ingredient[resource + '_max'] - self.ingredient[resource] self.ingredient[resource] += loot_count message_output(f"{self.name} {features['loot'][resource]['message']['loot']} {loot_count} " f"{features['loot'][resource]['name']} ein") message_output(f"{self.name} hat {self.ingredient[resource]}|{self.ingredient[resource + '_max']}" f" {features['loot'][resource]['name']}") message_output() return loot_count def building_action(self, building): if building in features['building']['building_list']: # erste buildin message message_output(f"{features['building'][building]['message']['first'].format(features['building'][building]['resource_coast']['Holz'])}") # Abfrage ob genug resourcen vorhanden sind if features['building'][building]['resource_coast']['Holz'] <= self.ingredient['wood']: message_output(f"{features['building'][building]['message']['resourcen_check'].format(self.ingredient['wood'])}") action = input(features['building'][building]['message']['input']) if action == 'e': message_output(f"{features['building'][building]['message']['wood_campfire']}") self.ingredient['wood'] -= features['building'][building]['resource_coast']['Holz'] time.sleep(1) count = 0 rand_number = random.randint(7, 10) while count < rand_number: if self.energy_max >= self.energy: message_output(f"{features['building'][building]['message']['fire']}") rand_energy = random.randint(8, 14) self.energy += rand_energy message_output(f"{self.energy} {self.energy_name}") time.sleep(1) count += 1 elif self.energy_max < self.energy: if rand_number > count: count += 1 message_output(f"{features['building'][building]['message']['fire']}") time.sleep(1) message_output(f"{features['building'][building]['message']['item_falls']}") time.sleep(1) message_output(f"{features['building'][building]['message']['pick_end']}") # Hp und Ma oder Sta anzeigen time.sleep(1) else: # falls nicht Meldung ausgeben wieviel noch fehlt need_resource = features['building'][building]['resource_coast']['Holz'] - self.ingredient['wood'] message_output(f"{features['building'][building]['message']['need_resourcen'].format(need_resource)}") else: message_output('Manaschrein ging shief', building) # Test Ausgabe def use_potion(self, potion_name): """Beschreibung: Falls K.I. oder der Spieler Tränke haben, werden diese benutzt um Mana oder Stamina zu erhöhen, der Trank wird aus der Itemliste gestrichen. Desweiteren wird Abgefragt ob es sich um einen Trank handelt Ausgabe im Singular (hat noch einen) oder um mehrere Ausgabe im Plural (hat noch ) oder um keinen Trank (hat keinen) :param potion_name: :return: """ # Abfragen ob Potion vorhanden sind if self.potions[potion_name] > 0: effect = features["potion"][self.energy_potion_name]["effect"] message_output(f'{self.name} {features["potion"][self.energy_potion_name]["message"]}') self.energy += features["potion"][self.energy_potion_name][effect] # mana oder sta wird erhöht self.potions[potion_name] -= 1 # eine Potion wird abgezogen if self.potions[potion_name] > 1: # abfrage für Singular | Plural message_output(f'{self.name} hat noch {self.potions[potion_name]} ' f'Flaschen {features["potion"][self.energy_potion_name]["name"]["p"]}') elif self.potions[potion_name] == 1: # abfrage für Singular | Plural message_output(f'{self.name} hat noch einen {features["potion"][self.energy_potion_name]["name"]["s"]}') elif self.potions[potion_name] == 0: message_output(f'{self.name} hat keinen {features["potion"][self.energy_potion_name]["name"]["s"]}') def auto_attack(self, enemy): """Beschreibung: Beschreibt den Kampfablauf der beiden K.I.'s :param enemy: die gegen K.I. :return: gibt die damage zurück (wird zur Anzeige benötigt |trifft mit 10 ..| ) Details: energi ist das Mana bei Magierklassen und Stamina bei den Kämpferklassen 1* Zuerst wird abgefragt ob die energie kleiner ist als die energiekosten des skills falls ja wird mit der Funktion use_potion('energypotion') ein Trank benutzt und die Energie wird um einen bestimmten Wert aufgefüllt 2* falls kein Trank mehr vorhanden ist und die Energikosten über der Energie sind, kämpft die K.I. mit der Waffe, Schaden(damage) wird der gegen K.I. vom den Lebenspunkte abgezogen wenn die Lebenspunkte größer gleich dem Schaden sind anderenfalls werden die Lebenspunkte des Gegners aud 0 gesetzt, so kann der Gegner keine minus Lebenspunkte haben 3* wie 2* nur das der Schaden von den Skill atacken kommt, hier werden die Energikosten der Energie der K.I. abgezogen """ damage = 0 if self.energy < features["skill"][self.skill_book[0]]["energy_cost"]: # 1* self.use_potion('energypotion') if self.potions['energypotion'] < 1 and \ self.energy < features["skill"][self.skill_book[0]]["energy_cost"]: # 2* message_output(f'{self.name} {self.weapon["message"]} {enemy.name}') damage = 10 if enemy.lives >= damage: enemy.lives -= damage else: enemy.lives = 0 if self.energy >= features["skill"][self.skill_book[0]]["energy_cost"]: # 3* message_output(f'{self.name} {features["skill"][self.skill_book[0]]["message"] } {enemy.name}') damage = (features['skill'][self.skill_book[0]]['dmg'] * self.lives) // 100 if enemy.lives >= damage: enemy.lives -= damage else: enemy.lives = 0 self.energy -= features['skill'][self.skill_book[0]]['energy_cost'] return damage def fight(self, enemy): pass # ######################### Override Methods ############################### def __str__(self) -> str: return f'Name: {self.name} \ init_lives {self.lives} \ lives {self.energy} \ strength {self.weapon}\ char {self.char_self}' # ######################### Getter Setter ########################## # @property # def name(self): # return self.__name # # @name.setter # def name(self, name): # self.__name = name # # @property # def lives(self): # return self.__lives # # @lives.setter # def lives(self, lives): # self.__lives = lives # # @property # def strength(self): # return self.__strength # # @strength.setter # def strength(self, strength): # self.__strength = strength # # @property # def char_auto(self): # return self.__char_auto # # @char_auto.setter # def char_auto(self, char_auto): # self.__char_auto = char_auto # # @property # def char_move(self): # return self.__char_move # # @char_move.setter # def char_move(self, char_move): # self.__char_move = char_move # # @property # def potions(self): # return self.__potions # # @potions.setter # def potions(self, potions): # self.__potions = potions # # @property # def brewed_potions(self): # return self.__brewed_potions # # @brewed_potions.setter # def brewed_potions(self, brewed_potions): # self.__brewed_potions = brewed_potions
import os os.system("python3 webserver.py &") import asyncio import uvloop import sys import discord import ps2 import pyps4 from fortnitepy.ext import commands asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) try: from typing import Any, Union, Optional import asyncio import datetime import json import functools import random as py_random import subprocess from fortnitepy.ext import commands import aioconsole import crayons import fortnitepy import FortniteAPIAsync import sanic import aiohttp except ModuleNotFoundError as e: print(f'Error: {e}\nAttempting to install packages now (this may take a while).') for module in ( 'crayons', 'fortnitepy', 'BenBotAsync', 'FortniteAPIAsync', 'uvloop', 'sanic', 'aiohttp', 'aioconsole' ): subprocess.check_call([sys.executable, "-m", "pip", "install", module]) os.system('clear') print('Installed packages, restarting script.') python = sys.executable os.execl(python, python, *sys.argv) print(crayons.cyan(f'Discord server: https://discord.gg/EWdPpeps94 - For support, questions, etc.')) sanic_app = sanic.Sanic(__name__) server = None name = "" filename = 'device_auths.json' @sanic_app.route('/', methods=['GET']) async def accept_ping(request: sanic.request.Request) -> None: return sanic.response.json({"status": "online"}) @sanic_app.route('/name', methods=['GET']) async def accept_ping(request: sanic.request.Request) -> None: # idk why this is green lol return sanic.response.json({"display_name": name}) def get_device_auth_details(): if os.path.isfile(filename): with open(filename, 'r') as fp: return json.load(fp) return {} def store_device_auth_details(email, details): existing = get_device_auth_details() existing[email] = details with open(filename, 'w') as fp: json.dump(existing, fp) async def get_authorization_code(): while True: response = await aioconsole.ainput("Go to https://rebrand.ly/authcode and sign in as " + os.getenv("EMAIL") + " and enter the response: ") if "redirectUrl" in response: response = json.loads(response) if "?code" not in response["redirectUrl"]: print("Invalid response.") continue code = response["redirectUrl"].split("?code=")[1] return code else: if "https://accounts.epicgames.com/fnauth" in response: if "?code" not in response: print("invalid response.") continue code = response.split("?code=")[1] return code else: code = response return code class SilverBot(commands.Bot): def __init__(self, email : str, password : str, **kwargs) -> None: self.status = os.getenv("STATUS") self.kairos = 'cid_028_ff2b06cf446376144ba408d3482f5c982bf2584cf0f508ee3e4ba4a0fd461a38' device_auth_details = get_device_auth_details().get(email, {}) super().__init__( command_prefix=os.getenv("PREFIX"), auth=fortnitepy.AdvancedAuth( email=email, password=password, prompt_authorization_code=False, delete_existing_device_auths=True, authorization_code=get_authorization_code, **device_auth_details ), status=self.status, platform=fortnitepy.Platform(os.getenv("PLATFORM")), avatar=fortnitepy.Avatar( asset=self.kairos, background_colors=fortnitepy.KairosBackgroundColorPreset.PINK.value ), **kwargs ) self.fortnite_api = FortniteAPIAsync.APIClient() self.loop = asyncio.get_event_loop() self.default_skin ="CID_NPC_Athena_Commando_M_Fallback" self.default_backpack = "BID_138_Celestial" self.default_pickaxe = os.getenv("PICKAXE") self.banner = "INFLUENCERBANNER27" self.banner_colour = "BID_138_Celestial" self.default_level = "666" self.default_bp_tier = "-666666666" self.default_emote = "EID_kpopdance03" self.sanic_app = sanic_app self.server = server self.whisper_message = "" self.welcome_message = "created bot by:g3_piton-52 bot creado por:g3_piton-52 create a lobby bot:https://discord.gg/EWdPpeps94 CREA TU PROPIO BOThttps://discord.gg/EWdPpeps94 twitch:skwox YT:its moderator Instagram:g3_ruben._.fuenla_ twiter:dekwik58 connect bot:16:60 a 21:00 frydays:9:00 a 23:00 my team fortnite Instagram:g3nesis_team my code creator nosoypayaso✦🍣 my number +994 40 477 91 82 check chat pls⃟" async def set_and_update_member_prop(self, schema_key: str, new_value: Any) -> None: prop = {schema_key: self.party.me.meta.set_prop(schema_key, new_value)} await self.party.me.patch(updated=prop) async def set_and_update_party_prop(self, schema_key: str, new_value: Any) -> None: prop = {schema_key: self.party.me.meta.set_prop(schema_key, new_value)} await self.party.patch(updated=prop) async def event_ready(self) -> None: global name name = self.user.display_name print(crayons.green(f'Client ready as {self.user.display_name}.')) coro = self.sanic_app.create_server( host='0.0.0.0', port=8000, return_asyncio_server=True, access_log=False ) self.server = await coro for pending in self.incoming_pending_friends: epic_friend = await pending.accept() if isinstance(epic_friend, fortnitepy.Friend): print(f"Accepted friend request from: {epic_friend.display_name}.") else: print(f"Accepted friend request from: {pending.display_name}.") async def event_party_invite(self, invite: fortnitepy.ReceivedPartyInvitation) -> None: await invite.accept() print(f'Accepted party invite from {invite.sender.display_name}.') async def event_friend_request(self, request: fortnitepy.IncomingPendingFriend) -> None: print(f"Received friend request from: {request.display_name}.") await request.accept() print(f"Accepted friend request from: {request.display_name}.") async def event_party_member_join(self, member: fortnitepy.PartyMember) -> None: await self.party.send(self.welcome_message.replace('{DISPLAY_NAME}', member.display_name)) if self.default_party_member_config.cls is not fortnitepy.party.JustChattingClientPartyMember: await self.party.me.edit( functools.partial( self.party.me.set_outfit, self.default_skin ), functools.partial( self.party.me.set_backpack, self.default_backpack ), functools.partial( self.party.me.set_pickaxe, self.default_pickaxe ), functools.partial( self.party.me.set_banner, icon=self.banner, color=self.banner_colour, season_level=self.default_level ), functools.partial( self.party.me.set_battlepass_info, has_purchased=True, level=self.default_bp_tier ) ) if self.default_party_member_config.cls is not fortnitepy.party.JustChattingClientPartyMember: await self.party.me.clear_emote() await self.party.me.set_emote(asset=self.default_emote) if self.user.display_name != member.display_name: # Welcomes the member who just joined. print(f"{member.display_name} has joined the lobby.") async def event_friend_message(self, message: fortnitepy.FriendMessage) -> None: print(f'{message.author.display_name}: {message.content}') await message.reply(self.welcome_message.replace('{DISPLAY_NAME}', message.author.display_name)) async def event_command_error(self, ctx: fortnitepy.ext.commands.Context, error: fortnitepy.ext.commands.CommandError) -> None: if isinstance(error, fortnitepy.ext.commands.errors.CommandNotFound): if isinstance(ctx.message, fortnitepy.FriendMessage): await ctx.send('Command not found, are you sure it exists?') else: pass elif isinstance(error, fortnitepy.ext.commands.errors.MissingRequiredArgument): await ctx.send('Failed to execute commands as there are missing requirements, please check usage.') elif isinstance(error, fortnitepy.ext.commands.errors.PrivateMessageOnly): pass else: raise error @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client using the outfits name.", help="Sets the outfit of the client using the outfits name.\n" "Example: !skin Nog Ops" ) async def skin(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaCharacter" ) await ctx.send(f'Skin set to {cosmetic.id}.') print(f"Set skin to: {cosmetic.id}.") await self.party.me.set_outfit(asset=cosmetic.id) except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find a skin with the name: {content}.") print(f"Failed to find a skin with the name: {content}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the backpack of the client using the backpacks name.", help="Sets the backpack of the client using the backpacks name.\n" "Example: !backpack Black Shield" ) async def backpack(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaBackpack" ) await ctx.send(f'Backpack set to {cosmetic.id}.') print(f"Set backpack to: {cosmetic.id}.") await self.party.me.set_backpack(asset=cosmetic.id) except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find a backpack with the name: {content}.") print(f"Failed to find a backpack with the name: {content}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the emote of the client using the emotes name.", help="Sets the emote of the client using the emotes name.\n" "Example: !emote Windmill Floss" ) async def emote(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaDance" ) await ctx.send(f'Emote set to {cosmetic.id}.') print(f"Set emote to: {cosmetic.id}.") await self.party.me.clear_emote() await self.party.me.set_emote(asset=cosmetic.id) except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find an emote with the name: {content}.") print(f"Failed to find an emote with the name: {content}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the pickaxe of the client using the pickaxe name.", help="Sets the pickaxe of the client using the pickaxe name.\n" "Example: !pickaxe Raider's Revenge" ) async def pickaxe(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaPickaxe" ) await ctx.send(f'Pickaxe set to {cosmetic.id}.') print(f"Set pickaxe to: {cosmetic.id}.") await self.party.me.set_pickaxe(asset=cosmetic.id) except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find a pickaxe with the name: {content}.") print(f"Failed to find a pickaxe with the name: {content}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the pet (backpack) of the client using the pets name.", help="Sets the pet (backpack) of the client using the pets name.\n" "Example: !pet Bonesy" ) async def pet(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaPetCarrier" ) await ctx.send(f'Pet set to {cosmetic.id}.') print(f"Set pet to: {cosmetic.id}.") await self.party.me.set_pet(asset=cosmetic.id) except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find a pet with the name: {content}.") print(f"Failed to find a pet with the name: {content}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the emoji of the client using the emojis name.", help="Sets the emoji of the client using the emojis name.\n" "Example: !emoji Snowball" ) async def emoji(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaEmoji" ) await ctx.send(f'Emoji set to {cosmetic.id}.') print(f"Set emoji to: {cosmetic.id}.") await self.party.me.set_emoji(asset=cosmetic.id) except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find an emoji with the name: {content}.") print(f"Failed to find an emoji with the name: {content}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the contrail of the client using the contrail name.", help="Sets the contrail of the client using the contrail name.\n" "Example: !contrail Holly And Divey" ) async def contrail(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaSkyDiveContrail" ) await ctx.send(f'Contrail set to {cosmetic.id}.') print(f"Set contrail to: {cosmetic.id}.") await self.party.me.set_contrail(asset=cosmetic.id) except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find a contrail with the name: {content}.") print(f"Failed to find an contrail with the name: {content}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Purple Skull Trooper.", help="Sets the outfit of the client to Purple Skull Trooper.\n" "Example: !purpleskull" ) async def purpleskull(self, ctx: fortnitepy.ext.commands.Context) -> None: skin_variants = self.party.me.create_variants( clothing_color=1 ) await self.party.me.set_outfit( asset='CID_030_Athena_Commando_M_Halloween', variants=skin_variants ) await ctx.send('Skin set to Purple Skull Trooper!') print(f"Skin set to Purple Skull Trooper.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Pink Ghoul Trooper.", help="Sets the outfit of the client to Pink Ghoul Trooper.\n" "Example: !pinkghoul" ) async def pinkghoul(self, ctx: fortnitepy.ext.commands.Context) -> None: skin_variants = self.party.me.create_variants( material=3 ) await self.party.me.set_outfit( asset='CID_029_Athena_Commando_F_Halloween', variants=skin_variants ) await ctx.send('Skin set to Pink Ghoul Trooper!') print(f"Skin set to Pink Ghoul Trooper.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the backpack of the client to Purple Ghost Portal.", help="Sets the backpack of the client to Purple Ghost Portal.\n" "Example: !purpleportal" ) async def purpleportal(self, ctx: fortnitepy.ext.commands.Context) -> None: skin_variants = self.party.me.create_variants( item='AthenaBackpack', particle_config='Particle', particle=1 ) await self.party.me.set_backpack( asset='BID_105_GhostPortal', variants=skin_variants ) await ctx.send('Backpack set to Purple Ghost Portal!') print(f"Backpack set to Purple Ghost Portal.") @commands.dm_only() @commands.command( description="[Party] Sets the banner of the self.", help="Sets the banner of the self.\n" "Example: !banner BRSeason01 defaultcolor15 100" ) async def banner(self, ctx: fortnitepy.ext.commands.Context, icon: Optional[str] = None, colour: Optional[str] = None, banner_level: Optional[int] = None ) -> None: await self.party.me.set_banner(icon=icon, color=colour, season_level=banner_level) await ctx.send(f'Banner set to: {icon} with {colour} at level {banner_level}.') print(f"Banner set to: {icon} with {colour} at level {banner_level}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client using CID.", help="Sets the outfit of the client using CID.\n" "Example: !cid CID_047_Athena_Commando_F_HolidayReindeer" ) async def cid(self, ctx: fortnitepy.ext.commands.Context, character_id: str) -> None: await self.party.me.set_outfit( asset=character_id, variants=self.party.me.create_variants(profile_banner='ProfileBanner') ) await ctx.send(f'Skin set to {character_id}.') print(f'Skin set to {character_id}.') @commands.dm_only() @commands.command( description="[Cosmetic] Creates the variants list by the variants you set using VTID.", help="Creates the variants list by the variants you set using VTID.\n" "Example: !vtid VTID_052_Skull_Trooper_RedFlames" ) async def vtid(self, ctx: fortnitepy.ext.commands.Context, variant_token: str) -> None: variant_id = await self.set_vtid(variant_token) if variant_id[1].lower() == 'particle': skin_variants = self.party.me.create_variants(particle_config='Particle', particle=1) else: skin_variants = self.party.me.create_variants(**{variant_id[1].lower(): int(variant_id[2])}) await self.party.me.set_outfit(asset=variant_id[0], variants=skin_variants) print(f'Set variants of {variant_id[0]} to {variant_id[1]} {variant_id[2]}.') await ctx.send(f'Variants set to {variant_token}.\n' '(Warning: This feature is not supported, please use !variants)') @commands.dm_only() @commands.command( description="[Cosmetic] Creates the variants list by the variants you set.", help="Creates the variants list by the variants you set.\n" "Example: !variants CID_030_Athena_Commando_M_Halloween clothing_color 1" ) async def variants(self, ctx: fortnitepy.ext.commands.Context, cosmetic_id: str, variant_type: str, variant_int: str) -> None: if 'cid' in cosmetic_id.lower() and 'jersey_color' not in variant_type.lower(): skin_variants = self.party.me.create_variants( **{variant_type: int(variant_int) if variant_int.isdigit() else variant_int} ) await self.party.me.set_outfit( asset=cosmetic_id, variants=skin_variants ) elif 'cid' in cosmetic_id.lower() and 'jersey_color' in variant_type.lower(): cosmetic_variants = self.party.me.create_variants( pattern=0, numeric=69, **{variant_type: int(variant_int) if variant_int.isdigit() else variant_int} ) await self.party.me.set_outfit( asset=cosmetic_id, variants=cosmetic_variants ) elif 'bid' in cosmetic_id.lower(): cosmetic_variants = self.party.me.create_variants( item='AthenaBackpack', **{variant_type: int(variant_int) if variant_int.isdigit() else variant_int} ) await self.party.me.set_backpack( asset=cosmetic_id, variants=cosmetic_variants ) elif 'pickaxe_id' in cosmetic_id.lower(): cosmetic_variants = self.party.me.create_variants( item='AthenaPickaxe', **{variant_type: int(variant_int) if variant_int.isdigit() else variant_int} ) await self.party.me.set_pickaxe( asset=cosmetic_id, variants=cosmetic_variants ) await ctx.send(f'Set variants of {cosmetic_id} to {variant_type} {variant_int}.') print(f'Set variants of {cosmetic_id} to {variant_type} {variant_int}.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Checkered Renegade.", help="Sets the outfit of the client to Checkered Renegade.\n" "Example: !che" ) async def checkeredrenegade(self, ctx: fortnitepy.ext.commands.Context) -> None: skin_variants = self.party.me.create_variants( material=2 ) await self.party.me.set_outfit( asset='CID_028_Athena_Commando_F', variants=skin_variants ) await ctx.send('Skin set to Checkered Renegade!') print(f'Skin set to Checkered Renegade.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Minty Elf.", help="Sets the outfit of the client to Minty Elf.\n" "Example: !mintyelf" ) async def mintyelf(self, ctx: fortnitepy.ext.commands.Context) -> None: skin_variants = self.party.me.create_variants( material=2 ) await self.party.me.set_outfit( asset='CID_051_Athena_Commando_M_HolidayElf', variants=skin_variants ) await ctx.send('Skin set to Minty Elf!') print(f'Skin set to Minty Elf.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the emote of the client using EID.", help="Sets the emote of the client using EID.\n" "Example: !eid EID_Floss" ) async def eid(self, ctx: fortnitepy.ext.commands.Context, emote_id: str) -> None: await self.party.me.clear_emote() await self.party.me.set_emote( asset=emote_id ) await ctx.send(f'Emote set to {emote_id}!') @commands.dm_only() @commands.command( description="[Cosmetic] Clears/stops the emote currently playing.", help="Clears/stops the emote currently playing.\n" "Example: !stop" ) async def stop(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.clear_emote() await ctx.send('Stopped emoting.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the backpack of the client using BID.", help="Sets the backpack of the client using BID.\n" "Example: !bid BID_023_Pinkbear" ) async def bid(self, ctx: fortnitepy.ext.commands.Context, backpack_id: str) -> None: await self.party.me.set_backpack( asset=backpack_id ) await ctx.send(f'Backbling set to {backpack_id}!') @commands.dm_only() @commands.command( aliases=['legacypickaxe'], description="[Cosmetic] Sets the pickaxe of the client using PICKAXE_ID", help="Sets the pickaxe of the client using PICKAXE_ID\n" "Example: !pickaxe_id Pickaxe_ID_073_Balloon" ) async def pickaxe_id(self, ctx: fortnitepy.ext.commands.Context, pickaxe_id_: str) -> None: await self.party.me.set_pickaxe( asset=pickaxe_id_ ) await ctx.send(f'Pickaxe set to {pickaxe_id_}') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the pet of the client using PetCarrier_.", help="Sets the pet of the client using PetCarrier_.\n" "Example: !pet_carrier PetCarrier_002_Chameleon" ) async def pet_carrier(self, ctx: fortnitepy.ext.commands.Context, pet_carrier_id: str) -> None: await self.party.me.set_pet( asset=pet_carrier_id ) await ctx.send(f'Pet set to {pet_carrier_id}!') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the emoji of the client using Emoji_.", help="Sets the emoji of the client using Emoji_.\n" "Example: !emoji_id Emoji_PeaceSign" ) async def emoji_id(self, ctx: fortnitepy.ext.commands.Context, emoji_: str) -> None: await self.party.me.clear_emote() await self.party.me.set_emoji( asset=emoji_ ) await ctx.send(f'Emoji set to {emoji_}!') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the contrail of the client using Trails_.", help="Sets the contrail of the client using Trails_.\n" "Example: !trails Trails_ID_075_Celestial" ) async def trails(self, ctx: fortnitepy.ext.commands.Context, trails_: str) -> None: await self.party.me.set_contrail( asset=trails_ ) await ctx.send(f'Contrail set to {trails_}!') @commands.dm_only() @commands.command( description="[Cosmetic] Sets pickaxe using PICKAXE_ID or display name & does 'Point it Out'. If no pickaxe is " "specified, only the emote will be played.", help="Sets pickaxe using PICKAXE_ID or display name & does 'Point it Out'. If no pickaxe is " "specified, only the emote will be played.\n" "Example: !point Pickaxe_ID_029_Assassin" ) async def point(self, ctx: fortnitepy.ext.commands.Context, *, content: Optional[str] = None) -> None: if content is None: await self.party.me.set_emote(asset='EID_IceKing') await ctx.send(f'Point it Out played.') elif 'pickaxe_id' in content.lower(): await self.party.me.set_pickaxe(asset=content) await self.party.me.set_emote(asset='EID_IceKing') await ctx.send(f'Pickaxe set to {content} & Point it Out played.') else: try: cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaPickaxe" ) await self.party.me.set_pickaxe(asset=cosmetic.id) await self.party.me.clear_emote() await self.party.me.set_emote(asset='EID_IceKing') await ctx.send(f'Pickaxe set to {content} & Point it Out played.') except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find a pickaxe with the name: {content}") @commands.dm_only() @commands.command( description="[Party] Sets the readiness of the client to ready.", help="Sets the readiness of the client to ready.\n" "Example: !ready" ) async def ready(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_ready(fortnitepy.ReadyState.READY) await ctx.send('Ready!') @commands.dm_only() @commands.command( aliases=['sitin'], description="[Party] Sets the readiness of the client to unready.", help="Sets the readiness of the client to unready.\n" "Example: !unready" ) async def unready(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_ready(fortnitepy.ReadyState.NOT_READY) await ctx.send('Unready!') @commands.dm_only() @commands.command( description="[Party] Sets the readiness of the client to SittingOut.", help="Sets the readiness of the client to SittingOut.\n" "Example: !sitout" ) async def sitout(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_ready(fortnitepy.ReadyState.SITTING_OUT) await ctx.send('Sitting Out!') @commands.dm_only() @commands.command( description="[Party] Sets the battlepass info of the self.", help="Sets the battlepass info of the self.\n" "Example: !bp 100" ) async def bp(self, ctx: fortnitepy.ext.commands.Context, tier: int) -> None: await self.party.me.set_battlepass_info( has_purchased=True, level=tier, ) await ctx.send(f'Set battle pass tier to {tier}.') @commands.dm_only() @commands.command( description="[Party] Sets the level of the self.", help="Sets the level of the self.\n" "Example: !level 999" ) async def level(self, ctx: fortnitepy.ext.commands.Context, banner_level: int) -> None: await self.party.me.set_banner( season_level=banner_level ) await ctx.send(f'Set level to {banner_level}.') @commands.dm_only() @commands.command( description="[Party] Sends message to party chat with the given content.", help="Sends message to party chat with the given content.\n" "Example: !echo i cant fix the fucking public lobby bots" ) async def echo(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: await self.party.send(content) await ctx.send('Sent message to party chat.') @commands.dm_only() @commands.command( description="[Client] Sends and sets the status.", help="Sends and sets the status.\n" "Example: !status Presence Unknown" ) async def status(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: await self.set_presence(content) await ctx.send(f'Status set to {content}') print(f'Status set to {content}.') @commands.dm_only() @commands.command( description="[Party] Leaves the current party.", help="Leaves the current party.\n" "Example: !leave" ) async def leave(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_emote('EID_Wave') await self.party.me.leave() await ctx.send('Bye!') print(f'Left the party as I was requested.') @commands.dm_only() @commands.command( description="[Party] Kicks the inputted user.", help="Kicks the inputted user.\n" "Example: !kick Cxnyaa" ) async def kick(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: str) -> None: user = await self.fetch_user(epic_username) member = self.party.members.get(user.id) if member is None: await ctx.send("Failed to find that user, are you sure they're in the party?") else: try: await member.kick() await ctx.send(f"Kicked user: {member.display_name}.") print(f"Kicked user: {member.display_name}") except fortnitepy.errors.Forbidden: await ctx.send(f"Failed to kick {member.display_name}, as I'm not party leader.") print(crayons.red(f"[ERROR] " "Failed to kick member as I don't have the required permissions.")) @commands.dm_only() @commands.command( aliases=['unhide'], description="[Party] Promotes the defined user to party leader. If friend is left blank, " "the message author will be used.", help="Promotes the defined user to party leader. If friend is left blank, the message author will be used.\n" "Example: !promote mxnty" ) async def promote(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: Optional[str] = None) -> None: if epic_username is None: user = await self.fetch_user(ctx.author.display_name) member = self.party.members.get(user.id) else: user = await self.fetch_user(epic_username) member = self.party.members.get(user.id) if member is None: await ctx.send("Failed to find that user, are you sure they're in the party?") else: try: await member.promote() await ctx.send(f"Promoted user: {member.display_name}.") print(f"Promoted user: {member.display_name}") except fortnitepy.errors.Forbidden: await ctx.send(f"Failed topromote {member.display_name}, as I'm not party leader.") print(crayons.red(f"[ERROR] " "Failed to kick member as I don't have the required permissions.")) @commands.dm_only() @commands.command( description="[Party] Sets the lobbies selected playlist.", help="Sets the lobbies selected playlist.\n" "Example: !playlist_id Playlist_Tank_Solo" ) async def playlist_id(self, ctx: fortnitepy.ext.commands.Context, playlist_: str) -> None: try: await self.party.set_playlist(playlist=playlist_) await ctx.send(f'Gamemode set to {playlist_}') except fortnitepy.errors.Forbidden: await ctx.send(f"Failed to set gamemode to {playlist_}, as I'm not party leader.") print(crayons.red(f"[ERROR] " "Failed to set gamemode as I don't have the required permissions.")) @commands.dm_only() @commands.command( description="[Party] Sets the parties current privacy.", help="Sets the parties current privacy.\n" "Example: !privacy private" ) async def privacy(self, ctx: fortnitepy.ext.commands.Context, privacy_type: str) -> None: try: if privacy_type.lower() == 'public': await self.party.set_privacy(fortnitepy.PartyPrivacy.PUBLIC) elif privacy_type.lower() == 'private': await self.party.set_privacy(fortnitepy.PartyPrivacy.PRIVATE) elif privacy_type.lower() == 'friends': await self.party.set_privacy(fortnitepy.PartyPrivacy.FRIENDS) elif privacy_type.lower() == 'friends_allow_friends_of_friends': await self.party.set_privacy(fortnitepy.PartyPrivacy.FRIENDS_ALLOW_FRIENDS_OF_FRIENDS) elif privacy_type.lower() == 'private_allow_friends_of_friends': await self.party.set_privacy(fortnitepy.PartyPrivacy.PRIVATE_ALLOW_FRIENDS_OF_FRIENDS) await ctx.send(f'Party privacy set to {self.party.privacy}.') print(f'Party privacy set to {self.party.privacy}.') except fortnitepy.errors.Forbidden: await ctx.send(f"Failed to set party privacy to {privacy_type}, as I'm not party leader.") print(crayons.red(f"[ERROR] " "Failed to set party privacy as I don't have the required permissions.")) @commands.dm_only() @commands.command( description="[Cosmetic] Copies the cosmetic loadout of the defined user. If user is left blank, " "the message author will be used.", help="Copies the cosmetic loadout of the defined user. If user is left blank, the message author will be used." "\nExample: !copy Terbau" ) async def copy(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: Optional[str] = None) -> None: if epic_username is None: member = self.party.members.get(ctx.author.id) else: user = await self.fetch_user(epic_username) member = self.party.members.get(user.id) await self.party.me.edit( functools.partial( fortnitepy.ClientPartyMember.set_outfit, asset=member.outfit, variants=member.outfit_variants ), functools.partial( fortnitepy.ClientPartyMember.set_backpack, asset=member.backpack, variants=member.backpack_variants ), functools.partial( fortnitepy.ClientPartyMember.set_pickaxe, asset=member.pickaxe, variants=member.pickaxe_variants ), functools.partial( fortnitepy.ClientPartyMember.set_banner, icon=member.banner[0], color=member.banner[1], season_level=member.banner[2] ), functools.partial( fortnitepy.ClientPartyMember.set_battlepass_info, has_purchased=True, level=member.battlepass_info[1] ) ) await self.party.me.set_emote(asset=member.emote) await ctx.send(f'Copied the loadout of {member.display_name}.') @commands.dm_only() @commands.command( description="[Cosmetic] Shortcut for equipping the skin CID_VIP_Athena_Commando_M_GalileoGondola_SG.", help="Shortcut for equipping the skin CID_VIP_Athena_Commando_M_GalileoGondola_SG.\n" "Example: !hologram" ) async def hologram(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_outfit( asset='CID_VIP_Athena_Commando_M_GalileoGondola_SG' ) await ctx.send('Skin set to Star Wars Hologram!') print(f'Skin set to Star Wars Hologram.') @commands.dm_only() @commands.command( description="[Cosmetic] Shortcut for equipping the skin CID_VIP_Athena_Commando_M_GalileoGondola_SG.", help="Shortcut for equipping the skin CID_VIP_Athena_Commando_M_GalileoGondola_SG.\n" "Example: !gift is a joke command." ) async def gift(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.clear_emote() await self.party.me.set_emote( asset='EID_NeverGonna' ) await ctx.send('What did you think would happen?') @commands.dm_only() @commands.command( description="[Party] Sets the parties custom matchmaking code.", help="Sets the parties custom matchmaking code.\n" "Example: !skin Nog Ops" ) async def matchmakingcode(self, ctx: fortnitepy.ext.commands.Context, *, custom_matchmaking_key: str) -> None: await self.party.set_custom_key( key=custom_matchmaking_key ) await ctx.send(f'Custom matchmaking code set to: {custom_matchmaking_key}') @commands.dm_only() @commands.command( description="[Cosmetic] Shortcut for equipping the emote EID_TourBus.", help="Shortcut for equipping the emote EID_TourBus.\n" "Example: !ponpon" ) async def ponpon(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_emote( asset='EID_TourBus' ) await ctx.send('Emote set to Ninja Style!') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the enlightened value of a skin " "(used for skins such as glitched Scratch or Golden Peely).", help="Sets the enlightened value of a skin.\n" "Example: !enlightened CID_701_Athena_Commando_M_BananaAgent 2 350" ) async def enlightened(self, ctx: fortnitepy.ext.commands.Context, cosmetic_id: str, br_season: int, skin_level: int) -> None: variant_types = { 1: self.party.me.create_variants(progressive=4), 2: self.party.me.create_variants(progressive=4), 3: self.party.me.create_variants(material=2) } if 'cid' in cosmetic_id.lower(): await self.party.me.set_outfit( asset=cosmetic_id, variants=variant_types[br_season] if br_season in variant_types else variant_types[2], enlightenment=(br_season, skin_level) ) await ctx.send(f'Skin set to {cosmetic_id} at level {skin_level} (for Season 1{br_season}).') elif 'bid' in cosmetic_id.lower(): await self.party.me.set_backpack( asset=cosmetic_id, variants=self.party.me.create_variants(progressive=2), enlightenment=(br_season, skin_level) ) await ctx.send(f'Backpack set to {cosmetic_id} at level {skin_level} (for Season 1{br_season}).') print(f'Enlightenment for {cosmetic_id} ' f'set to level {skin_level} (for Season 1{br_season}).') @commands.dm_only() @commands.command( description="[Cosmetic] Shortcut for equipping the skin CID_605_Athena_Commando_M_TourBus.", help="Shortcut for equipping the skin CID_605_Athena_Commando_M_TourBus.\n" "Example: !ninja" ) async def ninja(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_outfit( asset='CID_605_Athena_Commando_M_TourBus' ) await ctx.send('Skin set to Ninja!') print(f'Skin set to Ninja.') @commands.dm_only() @commands.command( description="[Cosmetic] Equips all very rare skins.", help="Equips all very rare skins.\n" "Example: !rareskins" ) async def rareskins(self, ctx: fortnitepy.ext.commands.Context) -> None: await ctx.send('Showing all rare skins now.') await self.party.me.set_outfit( asset='CID_030_Athena_Commando_M_Halloween', variants=self.party.me.create_variants(clothing_color=1) ) await ctx.send('Skin set to Purple Skull Trooper!') print(f"Skin set to Purple Skull Trooper.") await self.party.me.set_outfit( asset='CID_029_Athena_Commando_F_Halloween', variants=self.party.me.create_variants(material=3) ) await ctx.send('Skin set to Pink Ghoul Trooper!') print(f"Skin set to Pink Ghoul Trooper.") for rare_skin in ('CID_028_Athena_Commando_F', 'CID_017_Athena_Commando_M'): await self.party.me.set_outfit( asset=rare_skin ) await ctx.send(f'Skin set to {rare_skin}!') print(f"Skin set to: {rare_skin}!") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Golden Peely " "(shortcut for !enlightened CID_701_Athena_Commando_M_BananaAgent 2 350).", help="Sets the outfit of the client to Golden Peely.\n" "Example: !goldenpeely" ) async def goldenpeely(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_outfit( asset='CID_701_Athena_Commando_M_BananaAgent', variants=self.party.me.create_variants(progressive=4), enlightenment=(2, 350) ) await ctx.send(f'Skin set to Golden Peely.') @commands.dm_only() @commands.command( description="[Cosmetic] Randomly finds & equips a skin. Types currently include skin, backpack, emote & all. " "If type is left blank, a random skin will be equipped.", help="Randomly finds & equips a skin.\n" "Example: !random skin" ) async def random(self, ctx: fortnitepy.ext.commands.Context, cosmetic_type: str = 'skin') -> None: if cosmetic_type == 'skin': all_outfits = await self.fortnite_api.cosmetics.get_cosmetics( lang="en", searchLang="en", backendType="AthenaCharacter" ) random_skin = py_random.choice(all_outfits) await self.party.me.set_outfit( asset=random_skin.id, variants=self.party.me.create_variants(profile_banner='ProfileBanner') ) await ctx.send(f'Skin randomly set to {random_skin.name}.') print(f'Skin randomly set to {random_skin.name}.') elif cosmetic_type == 'backpack': all_backpacks = await self.fortnite_api.cosmetics.get_cosmetics( lang="en", searchLang="en", backendType="AthenaBackpack" ) random_backpack = py_random.choice(all_backpacks) await self.party.me.set_backpack( asset=random_backpack.id, variants=self.party.me.create_variants(profile_banner='ProfileBanner') ) await ctx.send(f'Backpack randomly set to {random_backpack.name}.') print(f'Backpack randomly set to {random_backpack.name}.') elif cosmetic_type == 'emote': all_emotes = await self.fortnite_api.cosmetics.get_cosmetics( lang="en", searchLang="en", backendType="AthenaDance" ) random_emote = py_random.choice(all_emotes) await self.party.me.set_emote( asset=random_emote.id ) await ctx.send(f'Emote randomly set to {random_emote.name}.') print(f'Emote randomly set to {random_emote.name}.') elif cosmetic_type == 'all': all_outfits = await self.fortnite_api.cosmetics.get_cosmetics( lang="en", searchLang="en", backendType="AthenaCharacter" ) all_backpacks = await self.fortnite_api.cosmetics.get_cosmetics( lang="en", searchLang="en", backendType="AthenaBackpack" ) all_emotes = await self.fortnite_api.cosmetics.get_cosmetics( lang="en", searchLang="en", backendType="AthenaDance" ) random_outfit = py_random.choice(all_outfits).id random_backpack = py_random.choice(all_backpacks).id random_emote = py_random.choice(all_emotes).id await self.party.me.set_outfit( asset=random_outfit ) await ctx.send(f'Skin randomly set to {random_outfit}.') await self.party.me.set_backpack( asset=random_backpack ) await ctx.send(f'Backpack randomly set to {random_backpack}.') await self.party.me.set_emote( asset=random_emote ) await ctx.send(f'Emote randomly set to {random_emote}.') @commands.dm_only() @commands.command( description="[Cosmetic] Clears the currently set backpack.", help="Clears the currently set backpack.\n" "Example: !nobackpack" ) async def nobackpack(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.clear_backpack() await ctx.send('Removed backpack.') @commands.dm_only() @commands.command( description="[Cosmetic] Clears the currently set pet.", help="Clears the currently set pet.\n" "Example: !nopet" ) async def nopet(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.clear_pet() await ctx.send('Removed pet.') @commands.dm_only() @commands.command( description="[Cosmetic] Clears the currently set contrail.", help="Clears the currently set contrail.\n" "Example: !nocontrail" ) async def nocontrail(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.clear_contrail() await ctx.send('Removed contrail.') @commands.dm_only() @commands.command( description="[Party] Sets the client to the \"In Match\" state. If the first argument is 'progressive', " "the players remaining will gradually drop to mimic a real game.", help="Sets the client to the \"In Match\" state.\n" "Example: !match 69 420" ) async def match(self, ctx: fortnitepy.ext.commands.Context, players: Union[str, int] = 0, match_time: int = 0) -> None: if players == 'progressive': match_time = datetime.datetime.utcnow() await self.party.me.set_in_match( players_left=100, started_at=match_time ) while (100 >= self.party.me.match_players_left > 0 and self.party.me.in_match()): await self.party.me.set_in_match( players_left=self.party.me.match_players_left - py_random.randint(3, 6), started_at=match_time ) else: await self.party.me.set_in_match( players_left=int(players), started_at=datetime.datetime.utcnow() - datetime.timedelta(minutes=match_time) ) await ctx.send(f'Set state to in-game in a match with {players} players.' '\nUse the command: !lobby to revert back to normal.') @commands.dm_only() @commands.command( description="[Party] Sets the client to normal pre-game lobby state.", help="Sets the client to normal pre-game lobby state.\n" "Example: !lobby" ) async def lobby(self, ctx: fortnitepy.ext.commands.Context) -> None: if self.default_party_member_config.cls == fortnitepy.JustChattingClientPartyMember: self.default_party_member_config.cls = fortnitepy.ClientPartyMember party_id = self.party.id await self.party.me.leave() await ctx.send('Removed state of Just Chattin\'. Now attempting to rejoin party.') try: await self.join_party(party_id) except fortnitepy.errors.Forbidden: await ctx.send('Failed to join back as party is set to private.') except fortnitepy.errors.NotFound: await ctx.send('Party not found, are you sure Fortnite is open?') await self.party.me.clear_in_match() await ctx.send('Set state to the pre-game lobby.') @commands.dm_only() @commands.command( description="[Party] Joins the party of the defined friend. If friend is left blank, " "the message author will be used.", help="Joins the party of the defined friend.\n" "Example: !join Terbau" ) async def join(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: Optional[str] = None) -> None: if epic_username is None: epic_friend = self.get_friend(ctx.author.id) else: user = await self.fetch_user(epic_username) if user is not None: epic_friend = self.get_friend(user.id) else: epic_friend = None await ctx.send(f'Failed to find user with the name: {epic_username}.') if isinstance(epic_friend, fortnitepy.Friend): try: await epic_friend.join_party() await ctx.send(f'Joined the party of {epic_friend.display_name}.') except fortnitepy.errors.Forbidden: await ctx.send('Failed to join party since it is private.') except fortnitepy.errors.PartyError: await ctx.send('Party not found, are you sure Fortnite is open?') else: await ctx.send('Cannot join party as the friend is not found.') @commands.dm_only() @commands.command( description="[Party] Sends the defined user a friend request.", help="Sends the defined user a friend request.\n" "Example: !friend Ninja" ) async def friend(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: str) -> None: user = await self.fetch_user(epic_username) if user is not None: await self.add_friend(user.id) await ctx.send(f'Sent/accepted friend request to/from {user.display_name}.') print(f'Sent/accepted friend request to/from {user.display_name}.') else: await ctx.send(f'Failed to find user with the name: {epic_username}.') print( crayons.red(f"[ERROR] Failed to find a user with the name {epic_username}.")) @commands.dm_only() @commands.command( description="[Party] Sets the lobbies selected playlist using playlist name.", help="Sets the lobbies selected playlist using playlist name.\n" "Example: !playlist Food Fight" ) async def playlist(self, ctx: fortnitepy.ext.commands.Context, *, playlist_name: str) -> None: try: scuffedapi_playlist_id = await self.fortnite_api.get_playlist(playlist_name) if scuffedapi_playlist_id is not None: await self.party.set_playlist(playlist=scuffedapi_playlist_id) await ctx.send(f'Playlist set to {scuffedapi_playlist_id}.') print(f'Playlist set to {scuffedapi_playlist_id}.') else: await ctx.send(f'Failed to find a playlist with the name: {playlist_name}.') print(crayons.red(f"[ERROR] " f"Failed to find a playlist with the name: {playlist_name}.")) except fortnitepy.errors.Forbidden: await ctx.send(f"Failed to set playlist to {playlist_name}, as I'm not party leader.") print(crayons.red(f"[ERROR] " "Failed to set playlist as I don't have the required permissions.")) @commands.dm_only() @commands.command( name="invite", description="[Party] Invites the defined friend to the party. If friend is left blank, " "the message author will be used.", help="Invites the defined friend to the party.\n" "Example: !invite Terbau" ) async def _invite(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: Optional[str] = None) -> None: if epic_username is None: epic_friend = self.get_friend(ctx.author.id) else: user = await self.fetch_user(epic_username) if user is not None: epic_friend = self.get_friend(user.id) else: epic_friend = None await ctx.send(f'Failed to find user with the name: {epic_username}.') print(crayons.red(f"[ERROR] " f"Failed to find user with the name: {epic_username}.")) if isinstance(epic_friend, fortnitepy.Friend): try: await epic_friend.invite() await ctx.send(f'Invited {epic_friend.display_name} to the party.') print(f"[ERROR] Invited {epic_friend.display_name} to the party.") except fortnitepy.errors.PartyError: await ctx.send('Failed to invite friend as they are either already in the party or it is full.') print(crayons.red(f"[ERROR] " "Failed to invite to party as friend is already either in party or it is full.")) else: await ctx.send('Cannot invite to party as the friend is not found.') print(crayons.red(f"[ERROR] " "Failed to invite to party as the friend is not found.")) @commands.dm_only() @commands.command( description="[Party] Hides everyone in the party except for the bot but if a player is specified, " "that specific player will be hidden.", help="Hides members of the party.\n" "Example: !hide" ) async def hide(self, ctx: fortnitepy.ext.commands.Context, party_member: Optional[str] = None) -> None: if self.party.me.leader: if party_member is not None: user = await self.fetch_user(party_member) member = self.party.members.get(user.id) if member is not None: raw_squad_assignments = self.party.meta.get_prop( 'Default:RawSquadAssignments_j' )["RawSquadAssignments"] for player in raw_squad_assignments: if player['memberId'] == member.id: raw_squad_assignments.remove(player) await self.party.set_and_update_party_prop( 'Default:RawSquadAssignments_j', { 'RawSquadAssignments': raw_squad_assignments } ) else: await ctx.send(f'Failed to find user with the name: {party_member}.') print(crayons.red(f"[ERROR] " f"Failed to find user with the name: {party_member}.")) else: await self.party.set_and_update_party_prop( 'Default:RawSquadAssignments_j', { 'RawSquadAssignments': [{'memberId': self.user.id, 'absoluteMemberIdx': 1}] } ) await ctx.send('Hid everyone in the party. Use !unhide if you want to unhide everyone.' '\nReminder: Crashing lobbies is bannable offense which will result in a permanent ban.') print(f'Hid everyone in the party.') else: await ctx.send("Failed to hide everyone, as I'm not party leader") print(crayons.red(f"[ERROR] " "Failed to hide everyone as I don't have the required permissions.")) @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client using the outfits name with the ghost variant.", help="Sets the outfit of the client using the outfits name with the ghost variant.\n" "Example: !ghost Meowscles" ) async def ghost(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: skin_variants = self.party.me.create_variants( progressive=2 ) cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaCharacter" ) await self.party.me.set_outfit( asset=cosmetic.id, variants=skin_variants ) await ctx.send(f'Skin set to Ghost {cosmetic.name}!') print(f'Skin set to Ghost {cosmetic.name}.') except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find a skin with the name: {content}.") print(f"Failed to find a skin with the name: {content}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client using the outfits name with the shadow variant.", help="Sets the outfit of the client using the outfits name with the shadow variant.\n" "Example: !shadow Midas" ) async def shadow(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: skin_variants = self.party.me.create_variants( progressive=3 ) cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaCharacter" ) await self.party.me.set_outfit( asset=cosmetic.id, variants=skin_variants ) await ctx.send(f'Skin set to Shadow {cosmetic.name}!') print(f'Skin set to Ghost {cosmetic.name}.') except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find a skin with the name: {content}.") print(f"Failed to find a skin with the name: {content}.") @commands.dm_only() @commands.command( description="[Client] Sets the clients kairos/PartyHub avatar.", help="Sets the clients kairos/PartyHub avatar.\n" "Example: !avatar stw_soldier_f" ) async def avatar(self, ctx: fortnitepy.ext.commands.Context, kairos_cid: str) -> None: kairos_avatar = fortnitepy.Avatar( asset=kairos_cid ) self.set_avatar(kairos_avatar) await ctx.send(f'Kairos avatar set to {kairos_cid}.') print(f'Kairos avatar set to {kairos_cid}.') @commands.dm_only() @commands.command( aliases=['clear'], description="[Client] Clears command prompt/terminal.", help="Clears command prompt/terminal.\n" "Example: !clean" ) async def clean(self, ctx: fortnitepy.ext.commands.Context) -> None: os.system('cls' if 'win' in sys.platform else 'clear') print(crayons.cyan(f'Silverbot Made By mxnty.')) print(crayons.cyan( f'Discord server: https://discord.gg/7cn33ZhZBg - For support, questions, etc.')) await ctx.send('Command prompt/terminal cleared.') print(f'Command prompt/terminal cleared.') @commands.dm_only() @commands.command( name="set", description="[Cosmetic] Equips all cosmetics from a set.", help="Equips all cosmetics from a set.\n" "Example: !set Fort Knights" ) async def _set(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: cosmetic_types = { "AthenaBackpack": self.party.me.set_backpack, "AthenaCharacter": self.party.me.set_outfit, "AthenaEmoji": self.party.me.set_emoji, "AthenaDance": self.party.me.set_emote } set_items = await self.fortnite_api.cosmetics.get_cosmetics( lang="en", searchLang="en", matchMethod="contains", set=content ) await ctx.send(f'Equipping all cosmetics from the {set_items[0].set} set.') print(f'Equipping all cosmetics from the {set_items[0].set} set.') for cosmetic in set_items: if cosmetic.backend_type.value in cosmetic_types: await cosmetic_types[cosmetic.backend_type.value](asset=cosmetic.id) await ctx.send(f'{cosmetic.short_description} set to {cosmetic.name}!') print(f'{cosmetic.short_description} set to {cosmetic.name}.') await ctx.send(f'Finished equipping all cosmetics from the {set_items[0].set} set.') print(f'Fishing equipping all cosmetics from the {set_items[0].set} set.') @commands.dm_only() @commands.command( description="[Cosmetic] Creates the variants list by the variants you set from skin name. " "If you want to include spaces in the skin name, you need to enclose it in \"'s.", help="Creates the variants list by the variants you set from skin name.\n" "Example: !style \"Skull Trooper\" clothing_color 1" ) async def style(self, ctx: fortnitepy.ext.commands.Context, cosmetic_name: str, variant_type: str, variant_int: str) -> None: # cosmetic_types = { # "AthenaCharacter": self.party.me.set_outfit, # "AthenaBackpack": self.party.me.set_backpack, # "AthenaPickaxe": self.party.me.set_pickaxe # } cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=cosmetic_name, backendType="AthenaCharacter" ) cosmetic_variants = self.party.me.create_variants( # item=cosmetic.backend_type.value, **{variant_type: int(variant_int) if variant_int.isdigit() else variant_int} ) # await cosmetic_types[cosmetic.backend_type.value]( await self.party.me.set_outfit( asset=cosmetic.id, variants=cosmetic_variants ) await ctx.send(f'Set variants of {cosmetic.id} to {variant_type} {variant_int}.') print(f'Set variants of {cosmetic.id} to {variant_type} {variant_int}.') @commands.dm_only() @commands.command( description="[Cosmetic] Equips all new non encrypted skins.", help="Equips all new non encrypted skins.\n" "Example: !new" ) async def new(self, ctx: fortnitepy.ext.commands.Context) -> None: new_skins = await self.fortnite_api.cosmetics.get_new_cosmetics() for new_skin in [new_cid for new_cid in new_skins if new_cid.split('/')[-1].lower().startswith('cid_')]: await self.party.me.set_outfit( asset=new_skin.split('/')[-1].split('.uasset')[0] ) await ctx.send(f"Skin set to {new_skin.split("/")[-1].split(".uasset")[0]}!") print(f"Skin set to: {new_skin.split("/")[-1].split(".uasset")[0]}!") await ctx.send(f'Finished equipping all new unencrypted skins.') print(f'Finished equipping all new unencrypted skins.') for new_emote in [new_eid for new_eid in new_skins if new_eid.split('/')[-1].lower().startswith('eid_')]: await self.party.me.set_emote( asset=new_emote.split('/')[-1].split('.uasset')[0] ) await ctx.send(f"Emote set to {new_emote.split("/")[-1].split(".uasset")[0]}!") print(f"Emote set to: {new_emote.split("/")[-1].split(".uasset")[0]}!") await ctx.send(f'Finished equipping all new unencrypted skins.') print(f'Finished equipping all new unencrypted skins.') @commands.dm_only() @commands.command( description="[Party] Sets the client to the \"Just Chattin'\" state.", help="Sets the client to the \"Just Chattin'\" state.\n" "Example: !justchattin" ) async def justchattin(self, ctx: fortnitepy.ext.commands.Context) -> None: self.default_party_member_config.cls = fortnitepy.JustChattingClientPartyMember party_id = self.party.id await self.party.me.leave() await ctx.send('Set state to Just Chattin\'. Now attempting to rejoin party.' '\nUse the command: !lobby to revert back to normal.') try: await self.join_party(party_id) except fortnitepy.errors.Forbidden: await ctx.send('Failed to join back as party is set to private.') except fortnitepy.errors.NotFound: await ctx.send('Party not found, are you sure Fortnite is open?') @commands.dm_only() @commands.command( description="[Cosmetic] Equips all skins currently in the item shop.", help="Equips all skins currently in the item shop.\n" "Example: !shop" ) async def shop(self, ctx: fortnitepy.ext.commands.Context) -> None: store = await self.fetch_item_shop() await ctx.send(f"Equipping all skins in today's item shop.") print(f"Equipping all skins in today's item shop.") for item in store.special_featured_items + \ store.special_daily_items + \ store.special_featured_items + \ store.special_daily_items: for grant in item.grants: if grant['type'] == 'AthenaCharacter': await self.party.me.set_outfit( asset=grant['asset'] ) await ctx.send(f"Skin set to {item.display_names[0]}!") print(f"Skin set to: {item.display_names[0]}!") await ctx.send(f'Finished equipping all skins in the item shop.') print(f'Finished equipping all skins in the item shop.') @commands.dm_only() @commands.command( description="[Cosmetic] Equips a random old default skin.", help="Equips a random old default skin.\n" "Example: !olddefault" ) async def olddefault(self, ctx: fortnitepy.ext.commands.Context) -> None: random_default = py_random.choice( [cid_ for cid_ in dir(fortnitepy.DefaultCharactersChapter1) if not cid_.startswith('_')] ) await self.party.me.set_outfit( asset=random_default ) await ctx.send(f'Skin set to {random_default}!') print(f"Skin set to {random_default}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Hatless Recon Expert.", help="Sets the outfit of the client to Hatless Recon Expert.\n" "Example: !hatlessrecon" ) async def hatlessrecon(self, ctx: fortnitepy.ext.commands.Context) -> None: skin_variants = self.party.me.create_variants( parts=2 ) await self.party.me.set_outfit( asset='CID_022_Athena_Commando_F', variants=skin_variants ) await ctx.send('Skin set to Hatless Recon Expert!') print(f'Skin set to Hatless Recon Expert.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the to the max tier skin in the defined season.", help="Sets the outfit of the to the max tier skin in the defined season.\n" "Example: !season 2" ) async def season(self, ctx: fortnitepy.ext.commands.Context, br_season: int) -> None: max_tier_skins = { 1: "CID_028_Athena_Commando_F", 2: "CID_035_Athena_Commando_M_Medieval", 3: "CID_084_Athena_Commando_M_Assassin", 4: "CID_116_Athena_Commando_M_CarbideBlack", 5: "CID_165_Athena_Commando_M_DarkViking", 6: "CID_230_Athena_Commando_M_Werewolf", 7: "CID_288_Athena_Commando_M_IceKing", 8: "CID_352_Athena_Commando_F_Shiny", 9: "CID_407_Athena_Commando_M_BattleSuit", 10: "CID_484_Athena_Commando_M_KnightRemix", 11: "CID_572_Athena_Commando_M_Viper", 12: "CID_694_Athena_Commando_M_CatBurglar", 13: "CID_767_Athena_Commando_F_BlackKnight" } await self.party.me.set_outfit(asset=max_tier_skins[br_season]) await ctx.send(f'Skin set to {max_tier_skins[br_season]}!') print(f"Skin set to {max_tier_skins[br_season]}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to a random Henchman skin.", help="Sets the outfit of the client to a random Henchman skin.\n" "Example: !henchman" ) async def henchman(self, ctx: fortnitepy.ext.commands.Context) -> None: random_henchman = py_random.choice( "CID_794_Athena_Commando_M_HenchmanBadShorts_D", "CID_NPC_Athena_Commando_F_HenchmanSpyDark", "CID_791_Athena_Commando_M_HenchmanGoodShorts_D", "CID_780_Athena_Commando_M_HenchmanBadShorts", "CID_NPC_Athena_Commando_M_HenchmanGood", "CID_692_Athena_Commando_M_HenchmanTough", "CID_707_Athena_Commando_M_HenchmanGood", "CID_792_Athena_Commando_M_HenchmanBadShorts_B", "CID_793_Athena_Commando_M_HenchmanBadShorts_C", "CID_NPC_Athena_Commando_M_HenchmanBad", "CID_790_Athena_Commando_M_HenchmanGoodShorts_C", "CID_779_Athena_Commando_M_HenchmanGoodShorts", "CID_NPC_Athena_Commando_F_RebirthDefault_Henchman", "CID_NPC_Athena_Commando_F_HenchmanSpyGood", "CID_706_Athena_Commando_M_HenchmanBad", "CID_789_Athena_Commando_M_HenchmanGoodShorts_B" ) await self.party.me.set_outfit( asset=random_henchman ) await ctx.send(f'Skin set to {random_henchman}!') print(f"Skin set to {random_henchman}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the emote of the client to Floss.", help="Sets the emote of the client to Floss.\n" "Example: !floss" ) async def floss(self, ctx: fortnitepy.ext.commands.Context) -> None: # // You caused this FunGames, you caused this... await self.party.me.set_emote( asset='EID_Floss' ) await ctx.send('Emote set to Floss!') print(f"Emote set to Floss.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to a random marauder skin.", help="Sets the outfit of the client to a random marauder skin.\n" "Example: !marauder" ) async def marauder(self, ctx: fortnitepy.ext.commands.Context) -> None: random_marauder = py_random.choice( "CID_NPC_Athena_Commando_M_MarauderHeavy", "CID_NPC_Athena_Commando_M_MarauderElite", "CID_NPC_Athena_Commando_M_MarauderGrunt" ) await self.party.me.set_outfit( asset=random_marauder ) await ctx.send(f'Skin set to {random_marauder}!') print(f"Skin set to {random_marauder}.") @commands.dm_only() @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Golden Brutus " "(shortcut for !enlightened CID_692_Athena_Commando_M_HenchmanTough 2 180).", help="Sets the outfit of the client to Golden Brutus.\n" "Example: !goldenbrutus" ) async def goldenbrutus(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_outfit( asset='CID_692_Athena_Commando_M_HenchmanTough', variants=self.party.me.create_variants(progressive=4), enlightenment=(2, 180) ) await ctx.send(f'Skin set to Golden Brutus.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Golden Meowscles " "(shortcut for !enlightened CID_693_Athena_Commando_M_BuffCat 2 220).", help="Sets the outfit of the client to Golden Meowscles.\n" "Example: !goldenmeowscles" ) async def goldenmeowscles(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_outfit( asset='CID_693_Athena_Commando_M_BuffCat', variants=self.party.me.create_variants(progressive=4), enlightenment=(2, 220) ) await ctx.send(f'Skin set to Golden Meowscles.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Golden Midas " "(shortcut for !enlightened CID_694_Athena_Commando_M_CatBurglar 2 140).", help="Sets the outfit of the client to Golden Peely.\n" "Example: !goldenmidas" ) async def goldenmidas(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_outfit( asset='CID_694_Athena_Commando_M_CatBurglar', variants=self.party.me.create_variants(progressive=4), enlightenment=(2, 140) ) await ctx.send(f'Skin set to Golden Midas.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Golden Skye " "(shortcut for !enlightened CID_690_Athena_Commando_F_Photographer 2 300).", help="Sets the outfit of the client to Golden Skye.\n" "Example: !goldenskye" ) async def goldenskye(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_outfit( asset='CID_690_Athena_Commando_F_Photographer', variants=self.party.me.create_variants(progressive=4), enlightenment=(2, 300) ) await ctx.send(f'Skin set to Golden Skye.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Golden TNTina " "(shortcut for !enlightened CID_691_Athena_Commando_F_TNTina 2 350).", help="Sets the outfit of the client to Golden TNTina.\n" "Example: !goldentntina" ) async def goldentntina(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_outfit( asset='CID_691_Athena_Commando_F_TNTina', variants=self.party.me.create_variants(progressive=7), enlightenment=(2, 260) ) await ctx.send(f'Skin set to Golden TNTina.') @commands.dm_only() @commands.command( description="[Client] Sends and sets the status to away.", help="Sends and sets the status to away.\n" "Example: !away" ) async def away(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.set_presence( status=self.status, away=fortnitepy.AwayStatus.AWAY ) await ctx.send('Status set to away.') if os.getenv("EMAIL") and os.getenv("PASSWORD"): bot = SilverBot( email=os.getenv("EMAIL"), password=os.getenv("PASSWORD") ) bot.run() else: sys.stderr.write("ERROR: Please enter email and password in the \".env\" file.\n") sys.exit()
import os os.system("python3 webserver.py &") import asyncio import uvloop import sys import discord import ps2 import pyps4 from fortnitepy.ext import commands asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) try: from typing import Any, Union, Optional import asyncio import datetime import json import functools import random as py_random import subprocess from fortnitepy.ext import commands import aioconsole import crayons import fortnitepy import FortniteAPIAsync import sanic import aiohttp except ModuleNotFoundError as e: print(f'Error: {e}\nAttempting to install packages now (this may take a while).') for module in ( 'crayons', 'fortnitepy', 'BenBotAsync', 'FortniteAPIAsync', 'uvloop', 'sanic', 'aiohttp', 'aioconsole' ): subprocess.check_call([sys.executable, "-m", "pip", "install", module]) os.system('clear') print('Installed packages, restarting script.') python = sys.executable os.execl(python, python, *sys.argv) print(crayons.cyan(f'Discord server: https://discord.gg/EWdPpeps94 - For support, questions, etc.')) sanic_app = sanic.Sanic(__name__) server = None name = "" filename = 'device_auths.json' @sanic_app.route('/', methods=['GET']) async def accept_ping(request: sanic.request.Request) -> None: return sanic.response.json({"status": "online"}) @sanic_app.route('/name', methods=['GET']) async def accept_ping(request: sanic.request.Request) -> None: # idk why this is green lol return sanic.response.json({"display_name": name}) def get_device_auth_details(): if os.path.isfile(filename): with open(filename, 'r') as fp: return json.load(fp) return {} def store_device_auth_details(email, details): existing = get_device_auth_details() existing[email] = details with open(filename, 'w') as fp: json.dump(existing, fp) async def get_authorization_code(): while True: response = await aioconsole.ainput("Go to https://rebrand.ly/authcode and sign in as " + os.getenv("EMAIL") + " and enter the response: ") if "redirectUrl" in response: response = json.loads(response) if "?code" not in response["redirectUrl"]: print("Invalid response.") continue code = response["redirectUrl"].split("?code=")[1] return code else: if "https://accounts.epicgames.com/fnauth" in response: if "?code" not in response: print("invalid response.") continue code = response.split("?code=")[1] return code else: code = response return code class SilverBot(commands.Bot): def __init__(self, email : str, password : str, **kwargs) -> None: self.status = os.getenv("STATUS") self.kairos = 'cid_028_ff2b06cf446376144ba408d3482f5c982bf2584cf0f508ee3e4ba4a0fd461a38' device_auth_details = get_device_auth_details().get(email, {}) super().__init__( command_prefix=os.getenv("PREFIX"), auth=fortnitepy.AdvancedAuth( email=email, password=password, prompt_authorization_code=False, delete_existing_device_auths=True, authorization_code=get_authorization_code, **device_auth_details ), status=self.status, platform=fortnitepy.Platform(os.getenv("PLATFORM")), avatar=fortnitepy.Avatar( asset=self.kairos, background_colors=fortnitepy.KairosBackgroundColorPreset.PINK.value ), **kwargs ) self.fortnite_api = FortniteAPIAsync.APIClient() self.loop = asyncio.get_event_loop() self.default_skin ="CID_NPC_Athena_Commando_M_Fallback" self.default_backpack = "BID_138_Celestial" self.default_pickaxe = os.getenv("PICKAXE") self.banner = "INFLUENCERBANNER27" self.banner_colour = "BID_138_Celestial" self.default_level = "666" self.default_bp_tier = "-666666666" self.default_emote = "EID_kpopdance03" self.sanic_app = sanic_app self.server = server self.whisper_message = "" self.welcome_message = "created bot by:g3_piton-52 bot creado por:g3_piton-52 create a lobby bot:https://discord.gg/EWdPpeps94 CREA TU PROPIO BOThttps://discord.gg/EWdPpeps94 twitch:skwox YT:its moderator Instagram:g3_ruben._.fuenla_ twiter:dekwik58 connect bot:16:60 a 21:00 frydays:9:00 a 23:00 my team fortnite Instagram:g3nesis_team my code creator nosoypayaso✦🍣 my number +994 40 477 91 82 check chat pls⃟" async def set_and_update_member_prop(self, schema_key: str, new_value: Any) -> None: prop = {schema_key: self.party.me.meta.set_prop(schema_key, new_value)} await self.party.me.patch(updated=prop) async def set_and_update_party_prop(self, schema_key: str, new_value: Any) -> None: prop = {schema_key: self.party.me.meta.set_prop(schema_key, new_value)} await self.party.patch(updated=prop) async def event_ready(self) -> None: global name name = self.user.display_name print(crayons.green(f'Client ready as {self.user.display_name}.')) coro = self.sanic_app.create_server( host='0.0.0.0', port=8000, return_asyncio_server=True, access_log=False ) self.server = await coro for pending in self.incoming_pending_friends: epic_friend = await pending.accept() if isinstance(epic_friend, fortnitepy.Friend): print(f"Accepted friend request from: {epic_friend.display_name}.") else: print(f"Accepted friend request from: {pending.display_name}.") async def event_party_invite(self, invite: fortnitepy.ReceivedPartyInvitation) -> None: await invite.accept() print(f'Accepted party invite from {invite.sender.display_name}.') async def event_friend_request(self, request: fortnitepy.IncomingPendingFriend) -> None: print(f"Received friend request from: {request.display_name}.") await request.accept() print(f"Accepted friend request from: {request.display_name}.") async def event_party_member_join(self, member: fortnitepy.PartyMember) -> None: await self.party.send(self.welcome_message.replace('{DISPLAY_NAME}', member.display_name)) if self.default_party_member_config.cls is not fortnitepy.party.JustChattingClientPartyMember: await self.party.me.edit( functools.partial( self.party.me.set_outfit, self.default_skin ), functools.partial( self.party.me.set_backpack, self.default_backpack ), functools.partial( self.party.me.set_pickaxe, self.default_pickaxe ), functools.partial( self.party.me.set_banner, icon=self.banner, color=self.banner_colour, season_level=self.default_level ), functools.partial( self.party.me.set_battlepass_info, has_purchased=True, level=self.default_bp_tier ) ) if self.default_party_member_config.cls is not fortnitepy.party.JustChattingClientPartyMember: await self.party.me.clear_emote() await self.party.me.set_emote(asset=self.default_emote) if self.user.display_name != member.display_name: # Welcomes the member who just joined. print(f"{member.display_name} has joined the lobby.") async def event_friend_message(self, message: fortnitepy.FriendMessage) -> None: print(f'{message.author.display_name}: {message.content}') await message.reply(self.welcome_message.replace('{DISPLAY_NAME}', message.author.display_name)) async def event_command_error(self, ctx: fortnitepy.ext.commands.Context, error: fortnitepy.ext.commands.CommandError) -> None: if isinstance(error, fortnitepy.ext.commands.errors.CommandNotFound): if isinstance(ctx.message, fortnitepy.FriendMessage): await ctx.send('Command not found, are you sure it exists?') else: pass elif isinstance(error, fortnitepy.ext.commands.errors.MissingRequiredArgument): await ctx.send('Failed to execute commands as there are missing requirements, please check usage.') elif isinstance(error, fortnitepy.ext.commands.errors.PrivateMessageOnly): pass else: raise error @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client using the outfits name.", help="Sets the outfit of the client using the outfits name.\n" "Example: !skin Nog Ops" ) async def skin(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaCharacter" ) await ctx.send(f'Skin set to {cosmetic.id}.') print(f"Set skin to: {cosmetic.id}.") await self.party.me.set_outfit(asset=cosmetic.id) except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find a skin with the name: {content}.") print(f"Failed to find a skin with the name: {content}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the backpack of the client using the backpacks name.", help="Sets the backpack of the client using the backpacks name.\n" "Example: !backpack Black Shield" ) async def backpack(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaBackpack" ) await ctx.send(f'Backpack set to {cosmetic.id}.') print(f"Set backpack to: {cosmetic.id}.") await self.party.me.set_backpack(asset=cosmetic.id) except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find a backpack with the name: {content}.") print(f"Failed to find a backpack with the name: {content}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the emote of the client using the emotes name.", help="Sets the emote of the client using the emotes name.\n" "Example: !emote Windmill Floss" ) async def emote(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaDance" ) await ctx.send(f'Emote set to {cosmetic.id}.') print(f"Set emote to: {cosmetic.id}.") await self.party.me.clear_emote() await self.party.me.set_emote(asset=cosmetic.id) except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find an emote with the name: {content}.") print(f"Failed to find an emote with the name: {content}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the pickaxe of the client using the pickaxe name.", help="Sets the pickaxe of the client using the pickaxe name.\n" "Example: !pickaxe Raider's Revenge" ) async def pickaxe(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaPickaxe" ) await ctx.send(f'Pickaxe set to {cosmetic.id}.') print(f"Set pickaxe to: {cosmetic.id}.") await self.party.me.set_pickaxe(asset=cosmetic.id) except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find a pickaxe with the name: {content}.") print(f"Failed to find a pickaxe with the name: {content}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the pet (backpack) of the client using the pets name.", help="Sets the pet (backpack) of the client using the pets name.\n" "Example: !pet Bonesy" ) async def pet(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaPetCarrier" ) await ctx.send(f'Pet set to {cosmetic.id}.') print(f"Set pet to: {cosmetic.id}.") await self.party.me.set_pet(asset=cosmetic.id) except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find a pet with the name: {content}.") print(f"Failed to find a pet with the name: {content}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the emoji of the client using the emojis name.", help="Sets the emoji of the client using the emojis name.\n" "Example: !emoji Snowball" ) async def emoji(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaEmoji" ) await ctx.send(f'Emoji set to {cosmetic.id}.') print(f"Set emoji to: {cosmetic.id}.") await self.party.me.set_emoji(asset=cosmetic.id) except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find an emoji with the name: {content}.") print(f"Failed to find an emoji with the name: {content}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the contrail of the client using the contrail name.", help="Sets the contrail of the client using the contrail name.\n" "Example: !contrail Holly And Divey" ) async def contrail(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaSkyDiveContrail" ) await ctx.send(f'Contrail set to {cosmetic.id}.') print(f"Set contrail to: {cosmetic.id}.") await self.party.me.set_contrail(asset=cosmetic.id) except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find a contrail with the name: {content}.") print(f"Failed to find an contrail with the name: {content}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Purple Skull Trooper.", help="Sets the outfit of the client to Purple Skull Trooper.\n" "Example: !purpleskull" ) async def purpleskull(self, ctx: fortnitepy.ext.commands.Context) -> None: skin_variants = self.party.me.create_variants( clothing_color=1 ) await self.party.me.set_outfit( asset='CID_030_Athena_Commando_M_Halloween', variants=skin_variants ) await ctx.send('Skin set to Purple Skull Trooper!') print(f"Skin set to Purple Skull Trooper.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Pink Ghoul Trooper.", help="Sets the outfit of the client to Pink Ghoul Trooper.\n" "Example: !pinkghoul" ) async def pinkghoul(self, ctx: fortnitepy.ext.commands.Context) -> None: skin_variants = self.party.me.create_variants( material=3 ) await self.party.me.set_outfit( asset='CID_029_Athena_Commando_F_Halloween', variants=skin_variants ) await ctx.send('Skin set to Pink Ghoul Trooper!') print(f"Skin set to Pink Ghoul Trooper.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the backpack of the client to Purple Ghost Portal.", help="Sets the backpack of the client to Purple Ghost Portal.\n" "Example: !purpleportal" ) async def purpleportal(self, ctx: fortnitepy.ext.commands.Context) -> None: skin_variants = self.party.me.create_variants( item='AthenaBackpack', particle_config='Particle', particle=1 ) await self.party.me.set_backpack( asset='BID_105_GhostPortal', variants=skin_variants ) await ctx.send('Backpack set to Purple Ghost Portal!') print(f"Backpack set to Purple Ghost Portal.") @commands.dm_only() @commands.command( description="[Party] Sets the banner of the self.", help="Sets the banner of the self.\n" "Example: !banner BRSeason01 defaultcolor15 100" ) async def banner(self, ctx: fortnitepy.ext.commands.Context, icon: Optional[str] = None, colour: Optional[str] = None, banner_level: Optional[int] = None ) -> None: await self.party.me.set_banner(icon=icon, color=colour, season_level=banner_level) await ctx.send(f'Banner set to: {icon} with {colour} at level {banner_level}.') print(f"Banner set to: {icon} with {colour} at level {banner_level}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client using CID.", help="Sets the outfit of the client using CID.\n" "Example: !cid CID_047_Athena_Commando_F_HolidayReindeer" ) async def cid(self, ctx: fortnitepy.ext.commands.Context, character_id: str) -> None: await self.party.me.set_outfit( asset=character_id, variants=self.party.me.create_variants(profile_banner='ProfileBanner') ) await ctx.send(f'Skin set to {character_id}.') print(f'Skin set to {character_id}.') @commands.dm_only() @commands.command( description="[Cosmetic] Creates the variants list by the variants you set using VTID.", help="Creates the variants list by the variants you set using VTID.\n" "Example: !vtid VTID_052_Skull_Trooper_RedFlames" ) async def vtid(self, ctx: fortnitepy.ext.commands.Context, variant_token: str) -> None: variant_id = await self.set_vtid(variant_token) if variant_id[1].lower() == 'particle': skin_variants = self.party.me.create_variants(particle_config='Particle', particle=1) else: skin_variants = self.party.me.create_variants(**{variant_id[1].lower(): int(variant_id[2])}) await self.party.me.set_outfit(asset=variant_id[0], variants=skin_variants) print(f'Set variants of {variant_id[0]} to {variant_id[1]} {variant_id[2]}.') await ctx.send(f'Variants set to {variant_token}.\n' '(Warning: This feature is not supported, please use !variants)') @commands.dm_only() @commands.command( description="[Cosmetic] Creates the variants list by the variants you set.", help="Creates the variants list by the variants you set.\n" "Example: !variants CID_030_Athena_Commando_M_Halloween clothing_color 1" ) async def variants(self, ctx: fortnitepy.ext.commands.Context, cosmetic_id: str, variant_type: str, variant_int: str) -> None: if 'cid' in cosmetic_id.lower() and 'jersey_color' not in variant_type.lower(): skin_variants = self.party.me.create_variants( **{variant_type: int(variant_int) if variant_int.isdigit() else variant_int} ) await self.party.me.set_outfit( asset=cosmetic_id, variants=skin_variants ) elif 'cid' in cosmetic_id.lower() and 'jersey_color' in variant_type.lower(): cosmetic_variants = self.party.me.create_variants( pattern=0, numeric=69, **{variant_type: int(variant_int) if variant_int.isdigit() else variant_int} ) await self.party.me.set_outfit( asset=cosmetic_id, variants=cosmetic_variants ) elif 'bid' in cosmetic_id.lower(): cosmetic_variants = self.party.me.create_variants( item='AthenaBackpack', **{variant_type: int(variant_int) if variant_int.isdigit() else variant_int} ) await self.party.me.set_backpack( asset=cosmetic_id, variants=cosmetic_variants ) elif 'pickaxe_id' in cosmetic_id.lower(): cosmetic_variants = self.party.me.create_variants( item='AthenaPickaxe', **{variant_type: int(variant_int) if variant_int.isdigit() else variant_int} ) await self.party.me.set_pickaxe( asset=cosmetic_id, variants=cosmetic_variants ) await ctx.send(f'Set variants of {cosmetic_id} to {variant_type} {variant_int}.') print(f'Set variants of {cosmetic_id} to {variant_type} {variant_int}.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Checkered Renegade.", help="Sets the outfit of the client to Checkered Renegade.\n" "Example: !che" ) async def checkeredrenegade(self, ctx: fortnitepy.ext.commands.Context) -> None: skin_variants = self.party.me.create_variants( material=2 ) await self.party.me.set_outfit( asset='CID_028_Athena_Commando_F', variants=skin_variants ) await ctx.send('Skin set to Checkered Renegade!') print(f'Skin set to Checkered Renegade.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Minty Elf.", help="Sets the outfit of the client to Minty Elf.\n" "Example: !mintyelf" ) async def mintyelf(self, ctx: fortnitepy.ext.commands.Context) -> None: skin_variants = self.party.me.create_variants( material=2 ) await self.party.me.set_outfit( asset='CID_051_Athena_Commando_M_HolidayElf', variants=skin_variants ) await ctx.send('Skin set to Minty Elf!') print(f'Skin set to Minty Elf.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the emote of the client using EID.", help="Sets the emote of the client using EID.\n" "Example: !eid EID_Floss" ) async def eid(self, ctx: fortnitepy.ext.commands.Context, emote_id: str) -> None: await self.party.me.clear_emote() await self.party.me.set_emote( asset=emote_id ) await ctx.send(f'Emote set to {emote_id}!') @commands.dm_only() @commands.command( description="[Cosmetic] Clears/stops the emote currently playing.", help="Clears/stops the emote currently playing.\n" "Example: !stop" ) async def stop(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.clear_emote() await ctx.send('Stopped emoting.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the backpack of the client using BID.", help="Sets the backpack of the client using BID.\n" "Example: !bid BID_023_Pinkbear" ) async def bid(self, ctx: fortnitepy.ext.commands.Context, backpack_id: str) -> None: await self.party.me.set_backpack( asset=backpack_id ) await ctx.send(f'Backbling set to {backpack_id}!') @commands.dm_only() @commands.command( aliases=['legacypickaxe'], description="[Cosmetic] Sets the pickaxe of the client using PICKAXE_ID", help="Sets the pickaxe of the client using PICKAXE_ID\n" "Example: !pickaxe_id Pickaxe_ID_073_Balloon" ) async def pickaxe_id(self, ctx: fortnitepy.ext.commands.Context, pickaxe_id_: str) -> None: await self.party.me.set_pickaxe( asset=pickaxe_id_ ) await ctx.send(f'Pickaxe set to {pickaxe_id_}') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the pet of the client using PetCarrier_.", help="Sets the pet of the client using PetCarrier_.\n" "Example: !pet_carrier PetCarrier_002_Chameleon" ) async def pet_carrier(self, ctx: fortnitepy.ext.commands.Context, pet_carrier_id: str) -> None: await self.party.me.set_pet( asset=pet_carrier_id ) await ctx.send(f'Pet set to {pet_carrier_id}!') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the emoji of the client using Emoji_.", help="Sets the emoji of the client using Emoji_.\n" "Example: !emoji_id Emoji_PeaceSign" ) async def emoji_id(self, ctx: fortnitepy.ext.commands.Context, emoji_: str) -> None: await self.party.me.clear_emote() await self.party.me.set_emoji( asset=emoji_ ) await ctx.send(f'Emoji set to {emoji_}!') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the contrail of the client using Trails_.", help="Sets the contrail of the client using Trails_.\n" "Example: !trails Trails_ID_075_Celestial" ) async def trails(self, ctx: fortnitepy.ext.commands.Context, trails_: str) -> None: await self.party.me.set_contrail( asset=trails_ ) await ctx.send(f'Contrail set to {trails_}!') @commands.dm_only() @commands.command( description="[Cosmetic] Sets pickaxe using PICKAXE_ID or display name & does 'Point it Out'. If no pickaxe is " "specified, only the emote will be played.", help="Sets pickaxe using PICKAXE_ID or display name & does 'Point it Out'. If no pickaxe is " "specified, only the emote will be played.\n" "Example: !point Pickaxe_ID_029_Assassin" ) async def point(self, ctx: fortnitepy.ext.commands.Context, *, content: Optional[str] = None) -> None: if content is None: await self.party.me.set_emote(asset='EID_IceKing') await ctx.send(f'Point it Out played.') elif 'pickaxe_id' in content.lower(): await self.party.me.set_pickaxe(asset=content) await self.party.me.set_emote(asset='EID_IceKing') await ctx.send(f'Pickaxe set to {content} & Point it Out played.') else: try: cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaPickaxe" ) await self.party.me.set_pickaxe(asset=cosmetic.id) await self.party.me.clear_emote() await self.party.me.set_emote(asset='EID_IceKing') await ctx.send(f'Pickaxe set to {content} & Point it Out played.') except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find a pickaxe with the name: {content}") @commands.dm_only() @commands.command( description="[Party] Sets the readiness of the client to ready.", help="Sets the readiness of the client to ready.\n" "Example: !ready" ) async def ready(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_ready(fortnitepy.ReadyState.READY) await ctx.send('Ready!') @commands.dm_only() @commands.command( aliases=['sitin'], description="[Party] Sets the readiness of the client to unready.", help="Sets the readiness of the client to unready.\n" "Example: !unready" ) async def unready(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_ready(fortnitepy.ReadyState.NOT_READY) await ctx.send('Unready!') @commands.dm_only() @commands.command( description="[Party] Sets the readiness of the client to SittingOut.", help="Sets the readiness of the client to SittingOut.\n" "Example: !sitout" ) async def sitout(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_ready(fortnitepy.ReadyState.SITTING_OUT) await ctx.send('Sitting Out!') @commands.dm_only() @commands.command( description="[Party] Sets the battlepass info of the self.", help="Sets the battlepass info of the self.\n" "Example: !bp 100" ) async def bp(self, ctx: fortnitepy.ext.commands.Context, tier: int) -> None: await self.party.me.set_battlepass_info( has_purchased=True, level=tier, ) await ctx.send(f'Set battle pass tier to {tier}.') @commands.dm_only() @commands.command( description="[Party] Sets the level of the self.", help="Sets the level of the self.\n" "Example: !level 999" ) async def level(self, ctx: fortnitepy.ext.commands.Context, banner_level: int) -> None: await self.party.me.set_banner( season_level=banner_level ) await ctx.send(f'Set level to {banner_level}.') @commands.dm_only() @commands.command( description="[Party] Sends message to party chat with the given content.", help="Sends message to party chat with the given content.\n" "Example: !echo i cant fix the fucking public lobby bots" ) async def echo(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: await self.party.send(content) await ctx.send('Sent message to party chat.') @commands.dm_only() @commands.command( description="[Client] Sends and sets the status.", help="Sends and sets the status.\n" "Example: !status Presence Unknown" ) async def status(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: await self.set_presence(content) await ctx.send(f'Status set to {content}') print(f'Status set to {content}.') @commands.dm_only() @commands.command( description="[Party] Leaves the current party.", help="Leaves the current party.\n" "Example: !leave" ) async def leave(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_emote('EID_Wave') await self.party.me.leave() await ctx.send('Bye!') print(f'Left the party as I was requested.') @commands.dm_only() @commands.command( description="[Party] Kicks the inputted user.", help="Kicks the inputted user.\n" "Example: !kick Cxnyaa" ) async def kick(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: str) -> None: user = await self.fetch_user(epic_username) member = self.party.members.get(user.id) if member is None: await ctx.send("Failed to find that user, are you sure they're in the party?") else: try: await member.kick() await ctx.send(f"Kicked user: {member.display_name}.") print(f"Kicked user: {member.display_name}") except fortnitepy.errors.Forbidden: await ctx.send(f"Failed to kick {member.display_name}, as I'm not party leader.") print(crayons.red(f"[ERROR] " "Failed to kick member as I don't have the required permissions.")) @commands.dm_only() @commands.command( aliases=['unhide'], description="[Party] Promotes the defined user to party leader. If friend is left blank, " "the message author will be used.", help="Promotes the defined user to party leader. If friend is left blank, the message author will be used.\n" "Example: !promote mxnty" ) async def promote(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: Optional[str] = None) -> None: if epic_username is None: user = await self.fetch_user(ctx.author.display_name) member = self.party.members.get(user.id) else: user = await self.fetch_user(epic_username) member = self.party.members.get(user.id) if member is None: await ctx.send("Failed to find that user, are you sure they're in the party?") else: try: await member.promote() await ctx.send(f"Promoted user: {member.display_name}.") print(f"Promoted user: {member.display_name}") except fortnitepy.errors.Forbidden: await ctx.send(f"Failed topromote {member.display_name}, as I'm not party leader.") print(crayons.red(f"[ERROR] " "Failed to kick member as I don't have the required permissions.")) @commands.dm_only() @commands.command( description="[Party] Sets the lobbies selected playlist.", help="Sets the lobbies selected playlist.\n" "Example: !playlist_id Playlist_Tank_Solo" ) async def playlist_id(self, ctx: fortnitepy.ext.commands.Context, playlist_: str) -> None: try: await self.party.set_playlist(playlist=playlist_) await ctx.send(f'Gamemode set to {playlist_}') except fortnitepy.errors.Forbidden: await ctx.send(f"Failed to set gamemode to {playlist_}, as I'm not party leader.") print(crayons.red(f"[ERROR] " "Failed to set gamemode as I don't have the required permissions.")) @commands.dm_only() @commands.command( description="[Party] Sets the parties current privacy.", help="Sets the parties current privacy.\n" "Example: !privacy private" ) async def privacy(self, ctx: fortnitepy.ext.commands.Context, privacy_type: str) -> None: try: if privacy_type.lower() == 'public': await self.party.set_privacy(fortnitepy.PartyPrivacy.PUBLIC) elif privacy_type.lower() == 'private': await self.party.set_privacy(fortnitepy.PartyPrivacy.PRIVATE) elif privacy_type.lower() == 'friends': await self.party.set_privacy(fortnitepy.PartyPrivacy.FRIENDS) elif privacy_type.lower() == 'friends_allow_friends_of_friends': await self.party.set_privacy(fortnitepy.PartyPrivacy.FRIENDS_ALLOW_FRIENDS_OF_FRIENDS) elif privacy_type.lower() == 'private_allow_friends_of_friends': await self.party.set_privacy(fortnitepy.PartyPrivacy.PRIVATE_ALLOW_FRIENDS_OF_FRIENDS) await ctx.send(f'Party privacy set to {self.party.privacy}.') print(f'Party privacy set to {self.party.privacy}.') except fortnitepy.errors.Forbidden: await ctx.send(f"Failed to set party privacy to {privacy_type}, as I'm not party leader.") print(crayons.red(f"[ERROR] " "Failed to set party privacy as I don't have the required permissions.")) @commands.dm_only() @commands.command( description="[Cosmetic] Copies the cosmetic loadout of the defined user. If user is left blank, " "the message author will be used.", help="Copies the cosmetic loadout of the defined user. If user is left blank, the message author will be used." "\nExample: !copy Terbau" ) async def copy(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: Optional[str] = None) -> None: if epic_username is None: member = self.party.members.get(ctx.author.id) else: user = await self.fetch_user(epic_username) member = self.party.members.get(user.id) await self.party.me.edit( functools.partial( fortnitepy.ClientPartyMember.set_outfit, asset=member.outfit, variants=member.outfit_variants ), functools.partial( fortnitepy.ClientPartyMember.set_backpack, asset=member.backpack, variants=member.backpack_variants ), functools.partial( fortnitepy.ClientPartyMember.set_pickaxe, asset=member.pickaxe, variants=member.pickaxe_variants ), functools.partial( fortnitepy.ClientPartyMember.set_banner, icon=member.banner[0], color=member.banner[1], season_level=member.banner[2] ), functools.partial( fortnitepy.ClientPartyMember.set_battlepass_info, has_purchased=True, level=member.battlepass_info[1] ) ) await self.party.me.set_emote(asset=member.emote) await ctx.send(f'Copied the loadout of {member.display_name}.') @commands.dm_only() @commands.command( description="[Cosmetic] Shortcut for equipping the skin CID_VIP_Athena_Commando_M_GalileoGondola_SG.", help="Shortcut for equipping the skin CID_VIP_Athena_Commando_M_GalileoGondola_SG.\n" "Example: !hologram" ) async def hologram(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_outfit( asset='CID_VIP_Athena_Commando_M_GalileoGondola_SG' ) await ctx.send('Skin set to Star Wars Hologram!') print(f'Skin set to Star Wars Hologram.') @commands.dm_only() @commands.command( description="[Cosmetic] Shortcut for equipping the skin CID_VIP_Athena_Commando_M_GalileoGondola_SG.", help="Shortcut for equipping the skin CID_VIP_Athena_Commando_M_GalileoGondola_SG.\n" "Example: !gift is a joke command." ) async def gift(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.clear_emote() await self.party.me.set_emote( asset='EID_NeverGonna' ) await ctx.send('What did you think would happen?') @commands.dm_only() @commands.command( description="[Party] Sets the parties custom matchmaking code.", help="Sets the parties custom matchmaking code.\n" "Example: !skin Nog Ops" ) async def matchmakingcode(self, ctx: fortnitepy.ext.commands.Context, *, custom_matchmaking_key: str) -> None: await self.party.set_custom_key( key=custom_matchmaking_key ) await ctx.send(f'Custom matchmaking code set to: {custom_matchmaking_key}') @commands.dm_only() @commands.command( description="[Cosmetic] Shortcut for equipping the emote EID_TourBus.", help="Shortcut for equipping the emote EID_TourBus.\n" "Example: !ponpon" ) async def ponpon(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_emote( asset='EID_TourBus' ) await ctx.send('Emote set to Ninja Style!') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the enlightened value of a skin " "(used for skins such as glitched Scratch or Golden Peely).", help="Sets the enlightened value of a skin.\n" "Example: !enlightened CID_701_Athena_Commando_M_BananaAgent 2 350" ) async def enlightened(self, ctx: fortnitepy.ext.commands.Context, cosmetic_id: str, br_season: int, skin_level: int) -> None: variant_types = { 1: self.party.me.create_variants(progressive=4), 2: self.party.me.create_variants(progressive=4), 3: self.party.me.create_variants(material=2) } if 'cid' in cosmetic_id.lower(): await self.party.me.set_outfit( asset=cosmetic_id, variants=variant_types[br_season] if br_season in variant_types else variant_types[2], enlightenment=(br_season, skin_level) ) await ctx.send(f'Skin set to {cosmetic_id} at level {skin_level} (for Season 1{br_season}).') elif 'bid' in cosmetic_id.lower(): await self.party.me.set_backpack( asset=cosmetic_id, variants=self.party.me.create_variants(progressive=2), enlightenment=(br_season, skin_level) ) await ctx.send(f'Backpack set to {cosmetic_id} at level {skin_level} (for Season 1{br_season}).') print(f'Enlightenment for {cosmetic_id} ' f'set to level {skin_level} (for Season 1{br_season}).') @commands.dm_only() @commands.command( description="[Cosmetic] Shortcut for equipping the skin CID_605_Athena_Commando_M_TourBus.", help="Shortcut for equipping the skin CID_605_Athena_Commando_M_TourBus.\n" "Example: !ninja" ) async def ninja(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_outfit( asset='CID_605_Athena_Commando_M_TourBus' ) await ctx.send('Skin set to Ninja!') print(f'Skin set to Ninja.') @commands.dm_only() @commands.command( description="[Cosmetic] Equips all very rare skins.", help="Equips all very rare skins.\n" "Example: !rareskins" ) async def rareskins(self, ctx: fortnitepy.ext.commands.Context) -> None: await ctx.send('Showing all rare skins now.') await self.party.me.set_outfit( asset='CID_030_Athena_Commando_M_Halloween', variants=self.party.me.create_variants(clothing_color=1) ) await ctx.send('Skin set to Purple Skull Trooper!') print(f"Skin set to Purple Skull Trooper.") await self.party.me.set_outfit( asset='CID_029_Athena_Commando_F_Halloween', variants=self.party.me.create_variants(material=3) ) await ctx.send('Skin set to Pink Ghoul Trooper!') print(f"Skin set to Pink Ghoul Trooper.") for rare_skin in ('CID_028_Athena_Commando_F', 'CID_017_Athena_Commando_M'): await self.party.me.set_outfit( asset=rare_skin ) await ctx.send(f'Skin set to {rare_skin}!') print(f"Skin set to: {rare_skin}!") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Golden Peely " "(shortcut for !enlightened CID_701_Athena_Commando_M_BananaAgent 2 350).", help="Sets the outfit of the client to Golden Peely.\n" "Example: !goldenpeely" ) async def goldenpeely(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_outfit( asset='CID_701_Athena_Commando_M_BananaAgent', variants=self.party.me.create_variants(progressive=4), enlightenment=(2, 350) ) await ctx.send(f'Skin set to Golden Peely.') @commands.dm_only() @commands.command( description="[Cosmetic] Randomly finds & equips a skin. Types currently include skin, backpack, emote & all. " "If type is left blank, a random skin will be equipped.", help="Randomly finds & equips a skin.\n" "Example: !random skin" ) async def random(self, ctx: fortnitepy.ext.commands.Context, cosmetic_type: str = 'skin') -> None: if cosmetic_type == 'skin': all_outfits = await self.fortnite_api.cosmetics.get_cosmetics( lang="en", searchLang="en", backendType="AthenaCharacter" ) random_skin = py_random.choice(all_outfits) await self.party.me.set_outfit( asset=random_skin.id, variants=self.party.me.create_variants(profile_banner='ProfileBanner') ) await ctx.send(f'Skin randomly set to {random_skin.name}.') print(f'Skin randomly set to {random_skin.name}.') elif cosmetic_type == 'backpack': all_backpacks = await self.fortnite_api.cosmetics.get_cosmetics( lang="en", searchLang="en", backendType="AthenaBackpack" ) random_backpack = py_random.choice(all_backpacks) await self.party.me.set_backpack( asset=random_backpack.id, variants=self.party.me.create_variants(profile_banner='ProfileBanner') ) await ctx.send(f'Backpack randomly set to {random_backpack.name}.') print(f'Backpack randomly set to {random_backpack.name}.') elif cosmetic_type == 'emote': all_emotes = await self.fortnite_api.cosmetics.get_cosmetics( lang="en", searchLang="en", backendType="AthenaDance" ) random_emote = py_random.choice(all_emotes) await self.party.me.set_emote( asset=random_emote.id ) await ctx.send(f'Emote randomly set to {random_emote.name}.') print(f'Emote randomly set to {random_emote.name}.') elif cosmetic_type == 'all': all_outfits = await self.fortnite_api.cosmetics.get_cosmetics( lang="en", searchLang="en", backendType="AthenaCharacter" ) all_backpacks = await self.fortnite_api.cosmetics.get_cosmetics( lang="en", searchLang="en", backendType="AthenaBackpack" ) all_emotes = await self.fortnite_api.cosmetics.get_cosmetics( lang="en", searchLang="en", backendType="AthenaDance" ) random_outfit = py_random.choice(all_outfits).id random_backpack = py_random.choice(all_backpacks).id random_emote = py_random.choice(all_emotes).id await self.party.me.set_outfit( asset=random_outfit ) await ctx.send(f'Skin randomly set to {random_outfit}.') await self.party.me.set_backpack( asset=random_backpack ) await ctx.send(f'Backpack randomly set to {random_backpack}.') await self.party.me.set_emote( asset=random_emote ) await ctx.send(f'Emote randomly set to {random_emote}.') @commands.dm_only() @commands.command( description="[Cosmetic] Clears the currently set backpack.", help="Clears the currently set backpack.\n" "Example: !nobackpack" ) async def nobackpack(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.clear_backpack() await ctx.send('Removed backpack.') @commands.dm_only() @commands.command( description="[Cosmetic] Clears the currently set pet.", help="Clears the currently set pet.\n" "Example: !nopet" ) async def nopet(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.clear_pet() await ctx.send('Removed pet.') @commands.dm_only() @commands.command( description="[Cosmetic] Clears the currently set contrail.", help="Clears the currently set contrail.\n" "Example: !nocontrail" ) async def nocontrail(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.clear_contrail() await ctx.send('Removed contrail.') @commands.dm_only() @commands.command( description="[Party] Sets the client to the \"In Match\" state. If the first argument is 'progressive', " "the players remaining will gradually drop to mimic a real game.", help="Sets the client to the \"In Match\" state.\n" "Example: !match 69 420" ) async def match(self, ctx: fortnitepy.ext.commands.Context, players: Union[str, int] = 0, match_time: int = 0) -> None: if players == 'progressive': match_time = datetime.datetime.utcnow() await self.party.me.set_in_match( players_left=100, started_at=match_time ) while (100 >= self.party.me.match_players_left > 0 and self.party.me.in_match()): await self.party.me.set_in_match( players_left=self.party.me.match_players_left - py_random.randint(3, 6), started_at=match_time ) else: await self.party.me.set_in_match( players_left=int(players), started_at=datetime.datetime.utcnow() - datetime.timedelta(minutes=match_time) ) await ctx.send(f'Set state to in-game in a match with {players} players.' '\nUse the command: !lobby to revert back to normal.') @commands.dm_only() @commands.command( description="[Party] Sets the client to normal pre-game lobby state.", help="Sets the client to normal pre-game lobby state.\n" "Example: !lobby" ) async def lobby(self, ctx: fortnitepy.ext.commands.Context) -> None: if self.default_party_member_config.cls == fortnitepy.JustChattingClientPartyMember: self.default_party_member_config.cls = fortnitepy.ClientPartyMember party_id = self.party.id await self.party.me.leave() await ctx.send('Removed state of Just Chattin\'. Now attempting to rejoin party.') try: await self.join_party(party_id) except fortnitepy.errors.Forbidden: await ctx.send('Failed to join back as party is set to private.') except fortnitepy.errors.NotFound: await ctx.send('Party not found, are you sure Fortnite is open?') await self.party.me.clear_in_match() await ctx.send('Set state to the pre-game lobby.') @commands.dm_only() @commands.command( description="[Party] Joins the party of the defined friend. If friend is left blank, " "the message author will be used.", help="Joins the party of the defined friend.\n" "Example: !join Terbau" ) async def join(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: Optional[str] = None) -> None: if epic_username is None: epic_friend = self.get_friend(ctx.author.id) else: user = await self.fetch_user(epic_username) if user is not None: epic_friend = self.get_friend(user.id) else: epic_friend = None await ctx.send(f'Failed to find user with the name: {epic_username}.') if isinstance(epic_friend, fortnitepy.Friend): try: await epic_friend.join_party() await ctx.send(f'Joined the party of {epic_friend.display_name}.') except fortnitepy.errors.Forbidden: await ctx.send('Failed to join party since it is private.') except fortnitepy.errors.PartyError: await ctx.send('Party not found, are you sure Fortnite is open?') else: await ctx.send('Cannot join party as the friend is not found.') @commands.dm_only() @commands.command( description="[Party] Sends the defined user a friend request.", help="Sends the defined user a friend request.\n" "Example: !friend Ninja" ) async def friend(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: str) -> None: user = await self.fetch_user(epic_username) if user is not None: await self.add_friend(user.id) await ctx.send(f'Sent/accepted friend request to/from {user.display_name}.') print(f'Sent/accepted friend request to/from {user.display_name}.') else: await ctx.send(f'Failed to find user with the name: {epic_username}.') print( crayons.red(f"[ERROR] Failed to find a user with the name {epic_username}.")) @commands.dm_only() @commands.command( description="[Party] Sets the lobbies selected playlist using playlist name.", help="Sets the lobbies selected playlist using playlist name.\n" "Example: !playlist Food Fight" ) async def playlist(self, ctx: fortnitepy.ext.commands.Context, *, playlist_name: str) -> None: try: scuffedapi_playlist_id = await self.fortnite_api.get_playlist(playlist_name) if scuffedapi_playlist_id is not None: await self.party.set_playlist(playlist=scuffedapi_playlist_id) await ctx.send(f'Playlist set to {scuffedapi_playlist_id}.') print(f'Playlist set to {scuffedapi_playlist_id}.') else: await ctx.send(f'Failed to find a playlist with the name: {playlist_name}.') print(crayons.red(f"[ERROR] " f"Failed to find a playlist with the name: {playlist_name}.")) except fortnitepy.errors.Forbidden: await ctx.send(f"Failed to set playlist to {playlist_name}, as I'm not party leader.") print(crayons.red(f"[ERROR] " "Failed to set playlist as I don't have the required permissions.")) @commands.dm_only() @commands.command( name="invite", description="[Party] Invites the defined friend to the party. If friend is left blank, " "the message author will be used.", help="Invites the defined friend to the party.\n" "Example: !invite Terbau" ) async def _invite(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: Optional[str] = None) -> None: if epic_username is None: epic_friend = self.get_friend(ctx.author.id) else: user = await self.fetch_user(epic_username) if user is not None: epic_friend = self.get_friend(user.id) else: epic_friend = None await ctx.send(f'Failed to find user with the name: {epic_username}.') print(crayons.red(f"[ERROR] " f"Failed to find user with the name: {epic_username}.")) if isinstance(epic_friend, fortnitepy.Friend): try: await epic_friend.invite() await ctx.send(f'Invited {epic_friend.display_name} to the party.') print(f"[ERROR] Invited {epic_friend.display_name} to the party.") except fortnitepy.errors.PartyError: await ctx.send('Failed to invite friend as they are either already in the party or it is full.') print(crayons.red(f"[ERROR] " "Failed to invite to party as friend is already either in party or it is full.")) else: await ctx.send('Cannot invite to party as the friend is not found.') print(crayons.red(f"[ERROR] " "Failed to invite to party as the friend is not found.")) @commands.dm_only() @commands.command( description="[Party] Hides everyone in the party except for the bot but if a player is specified, " "that specific player will be hidden.", help="Hides members of the party.\n" "Example: !hide" ) async def hide(self, ctx: fortnitepy.ext.commands.Context, party_member: Optional[str] = None) -> None: if self.party.me.leader: if party_member is not None: user = await self.fetch_user(party_member) member = self.party.members.get(user.id) if member is not None: raw_squad_assignments = self.party.meta.get_prop( 'Default:RawSquadAssignments_j' )["RawSquadAssignments"] for player in raw_squad_assignments: if player['memberId'] == member.id: raw_squad_assignments.remove(player) await self.party.set_and_update_party_prop( 'Default:RawSquadAssignments_j', { 'RawSquadAssignments': raw_squad_assignments } ) else: await ctx.send(f'Failed to find user with the name: {party_member}.') print(crayons.red(f"[ERROR] " f"Failed to find user with the name: {party_member}.")) else: await self.party.set_and_update_party_prop( 'Default:RawSquadAssignments_j', { 'RawSquadAssignments': [{'memberId': self.user.id, 'absoluteMemberIdx': 1}] } ) await ctx.send('Hid everyone in the party. Use !unhide if you want to unhide everyone.' '\nReminder: Crashing lobbies is bannable offense which will result in a permanent ban.') print(f'Hid everyone in the party.') else: await ctx.send("Failed to hide everyone, as I'm not party leader") print(crayons.red(f"[ERROR] " "Failed to hide everyone as I don't have the required permissions.")) @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client using the outfits name with the ghost variant.", help="Sets the outfit of the client using the outfits name with the ghost variant.\n" "Example: !ghost Meowscles" ) async def ghost(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: skin_variants = self.party.me.create_variants( progressive=2 ) cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaCharacter" ) await self.party.me.set_outfit( asset=cosmetic.id, variants=skin_variants ) await ctx.send(f'Skin set to Ghost {cosmetic.name}!') print(f'Skin set to Ghost {cosmetic.name}.') except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find a skin with the name: {content}.") print(f"Failed to find a skin with the name: {content}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client using the outfits name with the shadow variant.", help="Sets the outfit of the client using the outfits name with the shadow variant.\n" "Example: !shadow Midas" ) async def shadow(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: try: skin_variants = self.party.me.create_variants( progressive=3 ) cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=content, backendType="AthenaCharacter" ) await self.party.me.set_outfit( asset=cosmetic.id, variants=skin_variants ) await ctx.send(f'Skin set to Shadow {cosmetic.name}!') print(f'Skin set to Ghost {cosmetic.name}.') except FortniteAPIAsync.exceptions.NotFound: await ctx.send(f"Failed to find a skin with the name: {content}.") print(f"Failed to find a skin with the name: {content}.") @commands.dm_only() @commands.command( description="[Client] Sets the clients kairos/PartyHub avatar.", help="Sets the clients kairos/PartyHub avatar.\n" "Example: !avatar stw_soldier_f" ) async def avatar(self, ctx: fortnitepy.ext.commands.Context, kairos_cid: str) -> None: kairos_avatar = fortnitepy.Avatar( asset=kairos_cid ) self.set_avatar(kairos_avatar) await ctx.send(f'Kairos avatar set to {kairos_cid}.') print(f'Kairos avatar set to {kairos_cid}.') @commands.dm_only() @commands.command( aliases=['clear'], description="[Client] Clears command prompt/terminal.", help="Clears command prompt/terminal.\n" "Example: !clean" ) async def clean(self, ctx: fortnitepy.ext.commands.Context) -> None: os.system('cls' if 'win' in sys.platform else 'clear') print(crayons.cyan(f'Silverbot Made By mxnty.')) print(crayons.cyan( f'Discord server: https://discord.gg/7cn33ZhZBg - For support, questions, etc.')) await ctx.send('Command prompt/terminal cleared.') print(f'Command prompt/terminal cleared.') @commands.dm_only() @commands.command( name="set", description="[Cosmetic] Equips all cosmetics from a set.", help="Equips all cosmetics from a set.\n" "Example: !set Fort Knights" ) async def _set(self, ctx: fortnitepy.ext.commands.Context, *, content: str) -> None: cosmetic_types = { "AthenaBackpack": self.party.me.set_backpack, "AthenaCharacter": self.party.me.set_outfit, "AthenaEmoji": self.party.me.set_emoji, "AthenaDance": self.party.me.set_emote } set_items = await self.fortnite_api.cosmetics.get_cosmetics( lang="en", searchLang="en", matchMethod="contains", set=content ) await ctx.send(f'Equipping all cosmetics from the {set_items[0].set} set.') print(f'Equipping all cosmetics from the {set_items[0].set} set.') for cosmetic in set_items: if cosmetic.backend_type.value in cosmetic_types: await cosmetic_types[cosmetic.backend_type.value](asset=cosmetic.id) await ctx.send(f'{cosmetic.short_description} set to {cosmetic.name}!') print(f'{cosmetic.short_description} set to {cosmetic.name}.') await ctx.send(f'Finished equipping all cosmetics from the {set_items[0].set} set.') print(f'Fishing equipping all cosmetics from the {set_items[0].set} set.') @commands.dm_only() @commands.command( description="[Cosmetic] Creates the variants list by the variants you set from skin name. " "If you want to include spaces in the skin name, you need to enclose it in \"'s.", help="Creates the variants list by the variants you set from skin name.\n" "Example: !style \"Skull Trooper\" clothing_color 1" ) async def style(self, ctx: fortnitepy.ext.commands.Context, cosmetic_name: str, variant_type: str, variant_int: str) -> None: # cosmetic_types = { # "AthenaCharacter": self.party.me.set_outfit, # "AthenaBackpack": self.party.me.set_backpack, # "AthenaPickaxe": self.party.me.set_pickaxe # } cosmetic = await self.fortnite_api.cosmetics.get_cosmetic( lang="en", searchLang="en", matchMethod="contains", name=cosmetic_name, backendType="AthenaCharacter" ) cosmetic_variants = self.party.me.create_variants( # item=cosmetic.backend_type.value, **{variant_type: int(variant_int) if variant_int.isdigit() else variant_int} ) # await cosmetic_types[cosmetic.backend_type.value]( await self.party.me.set_outfit( asset=cosmetic.id, variants=cosmetic_variants ) await ctx.send(f'Set variants of {cosmetic.id} to {variant_type} {variant_int}.') print(f'Set variants of {cosmetic.id} to {variant_type} {variant_int}.') @commands.dm_only() @commands.command( description="[Cosmetic] Equips all new non encrypted skins.", help="Equips all new non encrypted skins.\n" "Example: !new" ) async def new(self, ctx: fortnitepy.ext.commands.Context) -> None: new_skins = await self.fortnite_api.cosmetics.get_new_cosmetics() for new_skin in [new_cid for new_cid in new_skins if new_cid.split('/')[-1].lower().startswith('cid_')]: await self.party.me.set_outfit( asset=new_skin.split('/')[-1].split('.uasset')[0] ) await ctx.send(f"Skin set to {new_skin.split('/')[-1].split('.uasset')[0]}!") print(f"Skin set to: {new_skin.split('/')[-1].split('.uasset')[0]}!") await ctx.send(f'Finished equipping all new unencrypted skins.') print(f'Finished equipping all new unencrypted skins.') for new_emote in [new_eid for new_eid in new_skins if new_eid.split('/')[-1].lower().startswith('eid_')]: await self.party.me.set_emote( asset=new_emote.split('/')[-1].split('.uasset')[0] ) await ctx.send(f"Emote set to {new_emote.split('/')[-1].split('.uasset')[0]}!") print(f"Emote set to: {new_emote.split('/')[-1].split('.uasset')[0]}!") await ctx.send(f'Finished equipping all new unencrypted skins.') print(f'Finished equipping all new unencrypted skins.') @commands.dm_only() @commands.command( description="[Party] Sets the client to the \"Just Chattin'\" state.", help="Sets the client to the \"Just Chattin'\" state.\n" "Example: !justchattin" ) async def justchattin(self, ctx: fortnitepy.ext.commands.Context) -> None: self.default_party_member_config.cls = fortnitepy.JustChattingClientPartyMember party_id = self.party.id await self.party.me.leave() await ctx.send('Set state to Just Chattin\'. Now attempting to rejoin party.' '\nUse the command: !lobby to revert back to normal.') try: await self.join_party(party_id) except fortnitepy.errors.Forbidden: await ctx.send('Failed to join back as party is set to private.') except fortnitepy.errors.NotFound: await ctx.send('Party not found, are you sure Fortnite is open?') @commands.dm_only() @commands.command( description="[Cosmetic] Equips all skins currently in the item shop.", help="Equips all skins currently in the item shop.\n" "Example: !shop" ) async def shop(self, ctx: fortnitepy.ext.commands.Context) -> None: store = await self.fetch_item_shop() await ctx.send(f"Equipping all skins in today's item shop.") print(f"Equipping all skins in today's item shop.") for item in store.special_featured_items + \ store.special_daily_items + \ store.special_featured_items + \ store.special_daily_items: for grant in item.grants: if grant['type'] == 'AthenaCharacter': await self.party.me.set_outfit( asset=grant['asset'] ) await ctx.send(f"Skin set to {item.display_names[0]}!") print(f"Skin set to: {item.display_names[0]}!") await ctx.send(f'Finished equipping all skins in the item shop.') print(f'Finished equipping all skins in the item shop.') @commands.dm_only() @commands.command( description="[Cosmetic] Equips a random old default skin.", help="Equips a random old default skin.\n" "Example: !olddefault" ) async def olddefault(self, ctx: fortnitepy.ext.commands.Context) -> None: random_default = py_random.choice( [cid_ for cid_ in dir(fortnitepy.DefaultCharactersChapter1) if not cid_.startswith('_')] ) await self.party.me.set_outfit( asset=random_default ) await ctx.send(f'Skin set to {random_default}!') print(f"Skin set to {random_default}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Hatless Recon Expert.", help="Sets the outfit of the client to Hatless Recon Expert.\n" "Example: !hatlessrecon" ) async def hatlessrecon(self, ctx: fortnitepy.ext.commands.Context) -> None: skin_variants = self.party.me.create_variants( parts=2 ) await self.party.me.set_outfit( asset='CID_022_Athena_Commando_F', variants=skin_variants ) await ctx.send('Skin set to Hatless Recon Expert!') print(f'Skin set to Hatless Recon Expert.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the to the max tier skin in the defined season.", help="Sets the outfit of the to the max tier skin in the defined season.\n" "Example: !season 2" ) async def season(self, ctx: fortnitepy.ext.commands.Context, br_season: int) -> None: max_tier_skins = { 1: "CID_028_Athena_Commando_F", 2: "CID_035_Athena_Commando_M_Medieval", 3: "CID_084_Athena_Commando_M_Assassin", 4: "CID_116_Athena_Commando_M_CarbideBlack", 5: "CID_165_Athena_Commando_M_DarkViking", 6: "CID_230_Athena_Commando_M_Werewolf", 7: "CID_288_Athena_Commando_M_IceKing", 8: "CID_352_Athena_Commando_F_Shiny", 9: "CID_407_Athena_Commando_M_BattleSuit", 10: "CID_484_Athena_Commando_M_KnightRemix", 11: "CID_572_Athena_Commando_M_Viper", 12: "CID_694_Athena_Commando_M_CatBurglar", 13: "CID_767_Athena_Commando_F_BlackKnight" } await self.party.me.set_outfit(asset=max_tier_skins[br_season]) await ctx.send(f'Skin set to {max_tier_skins[br_season]}!') print(f"Skin set to {max_tier_skins[br_season]}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to a random Henchman skin.", help="Sets the outfit of the client to a random Henchman skin.\n" "Example: !henchman" ) async def henchman(self, ctx: fortnitepy.ext.commands.Context) -> None: random_henchman = py_random.choice( "CID_794_Athena_Commando_M_HenchmanBadShorts_D", "CID_NPC_Athena_Commando_F_HenchmanSpyDark", "CID_791_Athena_Commando_M_HenchmanGoodShorts_D", "CID_780_Athena_Commando_M_HenchmanBadShorts", "CID_NPC_Athena_Commando_M_HenchmanGood", "CID_692_Athena_Commando_M_HenchmanTough", "CID_707_Athena_Commando_M_HenchmanGood", "CID_792_Athena_Commando_M_HenchmanBadShorts_B", "CID_793_Athena_Commando_M_HenchmanBadShorts_C", "CID_NPC_Athena_Commando_M_HenchmanBad", "CID_790_Athena_Commando_M_HenchmanGoodShorts_C", "CID_779_Athena_Commando_M_HenchmanGoodShorts", "CID_NPC_Athena_Commando_F_RebirthDefault_Henchman", "CID_NPC_Athena_Commando_F_HenchmanSpyGood", "CID_706_Athena_Commando_M_HenchmanBad", "CID_789_Athena_Commando_M_HenchmanGoodShorts_B" ) await self.party.me.set_outfit( asset=random_henchman ) await ctx.send(f'Skin set to {random_henchman}!') print(f"Skin set to {random_henchman}.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the emote of the client to Floss.", help="Sets the emote of the client to Floss.\n" "Example: !floss" ) async def floss(self, ctx: fortnitepy.ext.commands.Context) -> None: # // You caused this FunGames, you caused this... await self.party.me.set_emote( asset='EID_Floss' ) await ctx.send('Emote set to Floss!') print(f"Emote set to Floss.") @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to a random marauder skin.", help="Sets the outfit of the client to a random marauder skin.\n" "Example: !marauder" ) async def marauder(self, ctx: fortnitepy.ext.commands.Context) -> None: random_marauder = py_random.choice( "CID_NPC_Athena_Commando_M_MarauderHeavy", "CID_NPC_Athena_Commando_M_MarauderElite", "CID_NPC_Athena_Commando_M_MarauderGrunt" ) await self.party.me.set_outfit( asset=random_marauder ) await ctx.send(f'Skin set to {random_marauder}!') print(f"Skin set to {random_marauder}.") @commands.dm_only() @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Golden Brutus " "(shortcut for !enlightened CID_692_Athena_Commando_M_HenchmanTough 2 180).", help="Sets the outfit of the client to Golden Brutus.\n" "Example: !goldenbrutus" ) async def goldenbrutus(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_outfit( asset='CID_692_Athena_Commando_M_HenchmanTough', variants=self.party.me.create_variants(progressive=4), enlightenment=(2, 180) ) await ctx.send(f'Skin set to Golden Brutus.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Golden Meowscles " "(shortcut for !enlightened CID_693_Athena_Commando_M_BuffCat 2 220).", help="Sets the outfit of the client to Golden Meowscles.\n" "Example: !goldenmeowscles" ) async def goldenmeowscles(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_outfit( asset='CID_693_Athena_Commando_M_BuffCat', variants=self.party.me.create_variants(progressive=4), enlightenment=(2, 220) ) await ctx.send(f'Skin set to Golden Meowscles.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Golden Midas " "(shortcut for !enlightened CID_694_Athena_Commando_M_CatBurglar 2 140).", help="Sets the outfit of the client to Golden Peely.\n" "Example: !goldenmidas" ) async def goldenmidas(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_outfit( asset='CID_694_Athena_Commando_M_CatBurglar', variants=self.party.me.create_variants(progressive=4), enlightenment=(2, 140) ) await ctx.send(f'Skin set to Golden Midas.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Golden Skye " "(shortcut for !enlightened CID_690_Athena_Commando_F_Photographer 2 300).", help="Sets the outfit of the client to Golden Skye.\n" "Example: !goldenskye" ) async def goldenskye(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_outfit( asset='CID_690_Athena_Commando_F_Photographer', variants=self.party.me.create_variants(progressive=4), enlightenment=(2, 300) ) await ctx.send(f'Skin set to Golden Skye.') @commands.dm_only() @commands.command( description="[Cosmetic] Sets the outfit of the client to Golden TNTina " "(shortcut for !enlightened CID_691_Athena_Commando_F_TNTina 2 350).", help="Sets the outfit of the client to Golden TNTina.\n" "Example: !goldentntina" ) async def goldentntina(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.party.me.set_outfit( asset='CID_691_Athena_Commando_F_TNTina', variants=self.party.me.create_variants(progressive=7), enlightenment=(2, 260) ) await ctx.send(f'Skin set to Golden TNTina.') @commands.dm_only() @commands.command( description="[Client] Sends and sets the status to away.", help="Sends and sets the status to away.\n" "Example: !away" ) async def away(self, ctx: fortnitepy.ext.commands.Context) -> None: await self.set_presence( status=self.status, away=fortnitepy.AwayStatus.AWAY ) await ctx.send('Status set to away.') if os.getenv("EMAIL") and os.getenv("PASSWORD"): bot = SilverBot( email=os.getenv("EMAIL"), password=os.getenv("PASSWORD") ) bot.run() else: sys.stderr.write("ERROR: Please enter email and password in the \".env\" file.\n") sys.exit()
import json import psycopg2 print("Populating points") with open('fixtures/cols-and-passes.json') as f: points = json.loads(f.read()) values = [] for point in points: values.append(f"('{point["name"]}', '{point["lat"]}', '{point["lng"]}', (SELECT id FROM regions WHERE name='{point["region"]}')), ") with psycopg2.connect(database='bealach') as conn: cur = conn.cursor() concat_values = ''.join(values) query = f'INSERT INTO points (name, lat, lon, region_id) VALUES {concat_values.strip()[:-1]}' cur.execute(query) print("Successfully populated points")
import json import psycopg2 print("Populating points") with open('fixtures/cols-and-passes.json') as f: points = json.loads(f.read()) values = [] for point in points: values.append(f"('{point['name']}', '{point['lat']}', '{point['lng']}', (SELECT id FROM regions WHERE name='{point['region']}')), ") with psycopg2.connect(database='bealach') as conn: cur = conn.cursor() concat_values = ''.join(values) query = f'INSERT INTO points (name, lat, lon, region_id) VALUES {concat_values.strip()[:-1]}' cur.execute(query) print("Successfully populated points")
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # isort:skip_file # pylint: disable=invalid-name, no-self-use, too-many-public-methods, too-many-arguments """Unit tests for Superset""" import json from io import BytesIO from unittest import mock from zipfile import is_zipfile, ZipFile import prison import pytest import yaml from sqlalchemy.sql import func from superset import db, security_manager from superset.connectors.sqla.models import SqlaTable from superset.models.core import Database from superset.models.reports import ReportSchedule, ReportScheduleType from superset.utils.core import get_example_database, get_main_database from tests.base_tests import SupersetTestCase from tests.fixtures.certificates import ssl_certificate from tests.fixtures.energy_dashboard import load_energy_table_with_slice from tests.fixtures.importexport import ( database_config, dataset_config, database_metadata_config, dataset_metadata_config, ) from tests.fixtures.unicode_dashboard import load_unicode_dashboard_with_position from tests.test_app import app class TestDatabaseApi(SupersetTestCase): def insert_database( self, database_name: str, sqlalchemy_uri: str, extra: str = "", encrypted_extra: str = "", server_cert: str = "", expose_in_sqllab: bool = False, ) -> Database: database = Database( database_name=database_name, sqlalchemy_uri=sqlalchemy_uri, extra=extra, encrypted_extra=encrypted_extra, server_cert=server_cert, expose_in_sqllab=expose_in_sqllab, ) db.session.add(database) db.session.commit() return database @pytest.fixture() def create_database_with_report(self): with self.create_app().app_context(): example_db = get_example_database() database = self.insert_database( "database_with_report", example_db.sqlalchemy_uri_decrypted, expose_in_sqllab=True, ) report_schedule = ReportSchedule( type=ReportScheduleType.ALERT, name="report_with_database", crontab="* * * * *", database=database, ) db.session.add(report_schedule) db.session.commit() yield database # rollback changes db.session.delete(report_schedule) db.session.delete(database) db.session.commit() def create_database_import(self): buf = BytesIO() with ZipFile(buf, "w") as bundle: with bundle.open("database_export/metadata.yaml", "w") as fp: fp.write(yaml.safe_dump(database_metadata_config).encode()) with bundle.open( "database_export/databases/imported_database.yaml", "w" ) as fp: fp.write(yaml.safe_dump(database_config).encode()) with bundle.open( "database_export/datasets/imported_dataset.yaml", "w" ) as fp: fp.write(yaml.safe_dump(dataset_config).encode()) buf.seek(0) return buf def test_get_items(self): """ Database API: Test get items """ self.login(username="admin") uri = "api/v1/database/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 200) response = json.loads(rv.data.decode("utf-8")) expected_columns = [ "allow_csv_upload", "allow_ctas", "allow_cvas", "allow_dml", "allow_multi_schema_metadata_fetch", "allow_run_async", "allows_cost_estimate", "allows_subquery", "allows_virtual_table_explore", "backend", "changed_on", "changed_on_delta_humanized", "created_by", "database_name", "explore_database_id", "expose_in_sqllab", "force_ctas_schema", "function_names", "id", ] self.assertGreater(response["count"], 0) self.assertEqual(list(response["result"][0].keys()), expected_columns) def test_get_items_filter(self): """ Database API: Test get items with filter """ example_db = get_example_database() test_database = self.insert_database( "test-database", example_db.sqlalchemy_uri_decrypted, expose_in_sqllab=True ) dbs = db.session.query(Database).filter_by(expose_in_sqllab=True).all() self.login(username="admin") arguments = { "keys": ["none"], "filters": [{"col": "expose_in_sqllab", "opr": "eq", "value": True}], "order_columns": "database_name", "order_direction": "asc", "page": 0, "page_size": -1, } uri = f"api/v1/database/?q={prison.dumps(arguments)}" rv = self.client.get(uri) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(rv.status_code, 200) self.assertEqual(response["count"], len(dbs)) # Cleanup db.session.delete(test_database) db.session.commit() def test_get_items_not_allowed(self): """ Database API: Test get items not allowed """ self.login(username="gamma") uri = "api/v1/database/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 200) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(response["count"], 0) def test_create_database(self): """ Database API: Test create """ extra = { "metadata_params": {}, "engine_params": {}, "metadata_cache_timeout": {}, "schemas_allowed_for_csv_upload": [], } self.login(username="admin") example_db = get_example_database() if example_db.backend == "sqlite": return database_data = { "database_name": "test-create-database", "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, "server_cert": ssl_certificate, "extra": json.dumps(extra), } uri = "api/v1/database/" rv = self.client.post(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(rv.status_code, 201) # Cleanup model = db.session.query(Database).get(response.get("id")) db.session.delete(model) db.session.commit() def test_create_database_server_cert_validate(self): """ Database API: Test create server cert validation """ example_db = get_example_database() if example_db.backend == "sqlite": return self.login(username="admin") database_data = { "database_name": "test-create-database-invalid-cert", "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, "server_cert": "INVALID CERT", } uri = "api/v1/database/" rv = self.client.post(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) expected_response = {"message": {"server_cert": ["Invalid certificate"]}} self.assertEqual(rv.status_code, 400) self.assertEqual(response, expected_response) def test_create_database_json_validate(self): """ Database API: Test create encrypted extra and extra validation """ example_db = get_example_database() if example_db.backend == "sqlite": return self.login(username="admin") database_data = { "database_name": "test-create-database-invalid-json", "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, "encrypted_extra": '{"A": "a", "B", "C"}', "extra": '["A": "a", "B", "C"]', } uri = "api/v1/database/" rv = self.client.post(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) expected_response = { "message": { "encrypted_extra": [ "Field cannot be decoded by JSON. Expecting ':' " "delimiter: line 1 column 15 (char 14)" ], "extra": [ "Field cannot be decoded by JSON. Expecting ','" " delimiter: line 1 column 5 (char 4)" ], } } self.assertEqual(rv.status_code, 400) self.assertEqual(response, expected_response) def test_create_database_extra_metadata_validate(self): """ Database API: Test create extra metadata_params validation """ example_db = get_example_database() if example_db.backend == "sqlite": return extra = { "metadata_params": {"wrong_param": "some_value"}, "engine_params": {}, "metadata_cache_timeout": {}, "schemas_allowed_for_csv_upload": [], } self.login(username="admin") database_data = { "database_name": "test-create-database-invalid-extra", "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, "extra": json.dumps(extra), } uri = "api/v1/database/" rv = self.client.post(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) expected_response = { "message": { "extra": [ "The metadata_params in Extra field is not configured correctly." " The key wrong_param is invalid." ] } } self.assertEqual(rv.status_code, 400) self.assertEqual(response, expected_response) def test_create_database_unique_validate(self): """ Database API: Test create database_name already exists """ example_db = get_example_database() if example_db.backend == "sqlite": return self.login(username="admin") database_data = { "database_name": "examples", "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, } uri = "api/v1/database/" rv = self.client.post(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) expected_response = { "message": {"database_name": "A database with the same name already exists"} } self.assertEqual(rv.status_code, 422) self.assertEqual(response, expected_response) def test_create_database_uri_validate(self): """ Database API: Test create fail validate sqlalchemy uri """ self.login(username="admin") database_data = { "database_name": "test-database-invalid-uri", "sqlalchemy_uri": "wrong_uri", } uri = "api/v1/database/" rv = self.client.post(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(rv.status_code, 400) self.assertIn( "Invalid connection string", response["message"]["sqlalchemy_uri"][0], ) @mock.patch( "superset.views.core.app.config", {**app.config, "PREVENT_UNSAFE_DB_CONNECTIONS": True}, ) def test_create_database_fail_sqllite(self): """ Database API: Test create fail with sqllite """ database_data = { "database_name": "test-create-sqlite-database", "sqlalchemy_uri": "sqlite:////some.db", } uri = "api/v1/database/" self.login(username="admin") response = self.client.post(uri, json=database_data) response_data = json.loads(response.data.decode("utf-8")) expected_response = { "message": { "sqlalchemy_uri": [ "SQLite database cannot be used as a data source " "for security reasons." ] } } self.assertEqual(response_data, expected_response) self.assertEqual(response.status_code, 400) def test_create_database_conn_fail(self): """ Database API: Test create fails connection """ example_db = get_example_database() if example_db.backend in ("sqlite", "hive", "presto"): return example_db.password = "wrong_password" database_data = { "database_name": "test-create-database-wrong-password", "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, } uri = "api/v1/database/" self.login(username="admin") response = self.client.post(uri, json=database_data) response_data = json.loads(response.data.decode("utf-8")) expected_response = {"message": "Could not connect to database."} self.assertEqual(response.status_code, 422) self.assertEqual(response_data, expected_response) def test_update_database(self): """ Database API: Test update """ example_db = get_example_database() test_database = self.insert_database( "test-database", example_db.sqlalchemy_uri_decrypted ) self.login(username="admin") database_data = {"database_name": "test-database-updated"} uri = f"api/v1/database/{test_database.id}" rv = self.client.put(uri, json=database_data) self.assertEqual(rv.status_code, 200) # Cleanup model = db.session.query(Database).get(test_database.id) db.session.delete(model) db.session.commit() def test_update_database_conn_fail(self): """ Database API: Test update fails connection """ example_db = get_example_database() if example_db.backend in ("sqlite", "hive", "presto"): return test_database = self.insert_database( "test-database1", example_db.sqlalchemy_uri_decrypted ) example_db.password = "wrong_password" database_data = { "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, } uri = f"api/v1/database/{test_database.id}" self.login(username="admin") rv = self.client.put(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) expected_response = {"message": "Could not connect to database."} self.assertEqual(rv.status_code, 422) self.assertEqual(response, expected_response) # Cleanup model = db.session.query(Database).get(test_database.id) db.session.delete(model) db.session.commit() def test_update_database_uniqueness(self): """ Database API: Test update uniqueness """ example_db = get_example_database() test_database1 = self.insert_database( "test-database1", example_db.sqlalchemy_uri_decrypted ) test_database2 = self.insert_database( "test-database2", example_db.sqlalchemy_uri_decrypted ) self.login(username="admin") database_data = {"database_name": "test-database2"} uri = f"api/v1/database/{test_database1.id}" rv = self.client.put(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) expected_response = { "message": {"database_name": "A database with the same name already exists"} } self.assertEqual(rv.status_code, 422) self.assertEqual(response, expected_response) # Cleanup db.session.delete(test_database1) db.session.delete(test_database2) db.session.commit() def test_update_database_invalid(self): """ Database API: Test update invalid request """ self.login(username="admin") database_data = {"database_name": "test-database-updated"} uri = "api/v1/database/invalid" rv = self.client.put(uri, json=database_data) self.assertEqual(rv.status_code, 404) def test_update_database_uri_validate(self): """ Database API: Test update sqlalchemy_uri validate """ example_db = get_example_database() test_database = self.insert_database( "test-database", example_db.sqlalchemy_uri_decrypted ) self.login(username="admin") database_data = { "database_name": "test-database-updated", "sqlalchemy_uri": "wrong_uri", } uri = f"api/v1/database/{test_database.id}" rv = self.client.put(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(rv.status_code, 400) self.assertIn( "Invalid connection string", response["message"]["sqlalchemy_uri"][0], ) db.session.delete(test_database) db.session.commit() def test_delete_database(self): """ Database API: Test delete """ database_id = self.insert_database("test-database", "test_uri").id self.login(username="admin") uri = f"api/v1/database/{database_id}" rv = self.delete_assert_metric(uri, "delete") self.assertEqual(rv.status_code, 200) model = db.session.query(Database).get(database_id) self.assertEqual(model, None) def test_delete_database_not_found(self): """ Database API: Test delete not found """ max_id = db.session.query(func.max(Database.id)).scalar() self.login(username="admin") uri = f"api/v1/database/{max_id + 1}" rv = self.delete_assert_metric(uri, "delete") self.assertEqual(rv.status_code, 404) def test_delete_database_with_datasets(self): """ Database API: Test delete fails because it has depending datasets """ database_id = ( db.session.query(Database).filter_by(database_name="examples").one() ).id self.login(username="admin") uri = f"api/v1/database/{database_id}" rv = self.delete_assert_metric(uri, "delete") self.assertEqual(rv.status_code, 422) @pytest.mark.usefixtures("create_database_with_report") def test_delete_database_with_report(self): """ Database API: Test delete with associated report """ self.login(username="admin") database = ( db.session.query(Database) .filter(Database.database_name == "database_with_report") .one_or_none() ) uri = f"api/v1/database/{database.id}" rv = self.client.delete(uri) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(rv.status_code, 422) expected_response = { "message": "There are associated alerts or reports: report_with_database" } self.assertEqual(response, expected_response) def test_get_table_metadata(self): """ Database API: Test get table metadata info """ example_db = get_example_database() self.login(username="admin") uri = f"api/v1/database/{example_db.id}/table/birth_names/null/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 200) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(response["name"], "birth_names") self.assertIsNone(response["comment"]) self.assertTrue(len(response["columns"]) > 5) self.assertTrue(response.get("selectStar").startswith("SELECT")) def test_info_security_database(self): """ Database API: Test info security """ self.login(username="admin") params = {"keys": ["permissions"]} uri = f"api/v1/database/_info?q={prison.dumps(params)}" rv = self.get_assert_metric(uri, "info") data = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 200 assert "can_read" in data["permissions"] assert "can_write" in data["permissions"] assert len(data["permissions"]) == 2 def test_get_invalid_database_table_metadata(self): """ Database API: Test get invalid database from table metadata """ database_id = 1000 self.login(username="admin") uri = f"api/v1/database/{database_id}/table/some_table/some_schema/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 404) uri = "api/v1/database/some_database/table/some_table/some_schema/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 404) def test_get_invalid_table_table_metadata(self): """ Database API: Test get invalid table from table metadata """ example_db = get_example_database() uri = f"api/v1/database/{example_db.id}/wrong_table/null/" self.login(username="admin") rv = self.client.get(uri) self.assertEqual(rv.status_code, 404) def test_get_table_metadata_no_db_permission(self): """ Database API: Test get table metadata from not permitted db """ self.login(username="gamma") example_db = get_example_database() uri = f"api/v1/database/{example_db.id}/birth_names/null/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 404) def test_get_select_star(self): """ Database API: Test get select star """ self.login(username="admin") example_db = get_example_database() uri = f"api/v1/database/{example_db.id}/select_star/birth_names/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 200) response = json.loads(rv.data.decode("utf-8")) self.assertIn("gender", response["result"]) def test_get_select_star_not_allowed(self): """ Database API: Test get select star not allowed """ self.login(username="gamma") example_db = get_example_database() uri = f"api/v1/database/{example_db.id}/select_star/birth_names/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 404) def test_get_select_star_datasource_access(self): """ Database API: Test get select star with datasource access """ session = db.session table = SqlaTable( schema="main", table_name="ab_permission", database=get_main_database() ) session.add(table) session.commit() tmp_table_perm = security_manager.find_permission_view_menu( "datasource_access", table.get_perm() ) gamma_role = security_manager.find_role("Gamma") security_manager.add_permission_role(gamma_role, tmp_table_perm) self.login(username="gamma") main_db = get_main_database() uri = f"api/v1/database/{main_db.id}/select_star/ab_permission/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 200) # rollback changes security_manager.del_permission_role(gamma_role, tmp_table_perm) db.session.delete(table) db.session.delete(main_db) db.session.commit() def test_get_select_star_not_found_database(self): """ Database API: Test get select star not found database """ self.login(username="admin") max_id = db.session.query(func.max(Database.id)).scalar() uri = f"api/v1/database/{max_id + 1}/select_star/birth_names/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 404) def test_get_select_star_not_found_table(self): """ Database API: Test get select star not found database """ self.login(username="admin") example_db = get_example_database() # sqllite will not raise a NoSuchTableError if example_db.backend == "sqlite": return uri = f"api/v1/database/{example_db.id}/select_star/table_does_not_exist/" rv = self.client.get(uri) # TODO(bkyryliuk): investigate why presto returns 500 self.assertEqual(rv.status_code, 404 if example_db.backend != "presto" else 500) def test_database_schemas(self): """ Database API: Test database schemas """ self.login("admin") database = db.session.query(Database).first() schemas = database.get_all_schema_names() rv = self.client.get(f"api/v1/database/{database.id}/schemas/") response = json.loads(rv.data.decode("utf-8")) self.assertEqual(schemas, response["result"]) rv = self.client.get( f"api/v1/database/{database.id}/schemas/?q={prison.dumps({"force": True})}" ) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(schemas, response["result"]) def test_database_schemas_not_found(self): """ Database API: Test database schemas not found """ self.logout() self.login(username="gamma") example_db = get_example_database() uri = f"api/v1/database/{example_db.id}/schemas/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 404) def test_database_schemas_invalid_query(self): """ Database API: Test database schemas with invalid query """ self.login("admin") database = db.session.query(Database).first() rv = self.client.get( f"api/v1/database/{database.id}/schemas/?q={prison.dumps({"force": "nop"})}" ) self.assertEqual(rv.status_code, 400) def test_test_connection(self): """ Database API: Test test connection """ extra = { "metadata_params": {}, "engine_params": {}, "metadata_cache_timeout": {}, "schemas_allowed_for_csv_upload": [], } # need to temporarily allow sqlite dbs, teardown will undo this app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = False self.login("admin") example_db = get_example_database() # validate that the endpoint works with the password-masked sqlalchemy uri data = { "database_name": "examples", "encrypted_extra": "{}", "extra": json.dumps(extra), "impersonate_user": False, "sqlalchemy_uri": example_db.safe_sqlalchemy_uri(), "server_cert": ssl_certificate, } url = "api/v1/database/test_connection" rv = self.post_assert_metric(url, data, "test_connection") self.assertEqual(rv.status_code, 200) self.assertEqual(rv.headers["Content-Type"], "application/json; charset=utf-8") # validate that the endpoint works with the decrypted sqlalchemy uri data = { "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, "database_name": "examples", "impersonate_user": False, "extra": json.dumps(extra), "server_cert": None, } rv = self.post_assert_metric(url, data, "test_connection") self.assertEqual(rv.status_code, 200) self.assertEqual(rv.headers["Content-Type"], "application/json; charset=utf-8") def test_test_connection_failed(self): """ Database API: Test test connection failed """ self.login("admin") data = { "sqlalchemy_uri": "broken://url", "database_name": "examples", "impersonate_user": False, "server_cert": None, } url = "api/v1/database/test_connection" rv = self.post_assert_metric(url, data, "test_connection") self.assertEqual(rv.status_code, 400) self.assertEqual(rv.headers["Content-Type"], "application/json; charset=utf-8") response = json.loads(rv.data.decode("utf-8")) expected_response = { "driver_name": "broken", "message": "Could not load database driver: broken", } self.assertEqual(response, expected_response) data = { "sqlalchemy_uri": "mssql+pymssql://url", "database_name": "examples", "impersonate_user": False, "server_cert": None, } rv = self.post_assert_metric(url, data, "test_connection") self.assertEqual(rv.status_code, 400) self.assertEqual(rv.headers["Content-Type"], "application/json; charset=utf-8") response = json.loads(rv.data.decode("utf-8")) expected_response = { "driver_name": "mssql+pymssql", "message": "Could not load database driver: mssql+pymssql", } self.assertEqual(response, expected_response) def test_test_connection_unsafe_uri(self): """ Database API: Test test connection with unsafe uri """ self.login("admin") app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = True data = { "sqlalchemy_uri": "sqlite:///home/superset/unsafe.db", "database_name": "unsafe", "impersonate_user": False, "server_cert": None, } url = "api/v1/database/test_connection" rv = self.post_assert_metric(url, data, "test_connection") self.assertEqual(rv.status_code, 400) response = json.loads(rv.data.decode("utf-8")) expected_response = { "message": { "sqlalchemy_uri": [ "SQLite database cannot be used as a data source for security reasons." ] } } self.assertEqual(response, expected_response) app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = False @pytest.mark.usefixtures( "load_unicode_dashboard_with_position", "load_energy_table_with_slice" ) def test_get_database_related_objects(self): """ Database API: Test get chart and dashboard count related to a database :return: """ self.login(username="admin") database = get_example_database() uri = f"api/v1/database/{database.id}/related_objects/" rv = self.get_assert_metric(uri, "related_objects") self.assertEqual(rv.status_code, 200) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(response["charts"]["count"], 33) self.assertEqual(response["dashboards"]["count"], 3) def test_get_database_related_objects_not_found(self): """ Database API: Test related objects not found """ max_id = db.session.query(func.max(Database.id)).scalar() # id does not exist and we get 404 invalid_id = max_id + 1 uri = f"api/v1/database/{invalid_id}/related_objects/" self.login(username="admin") rv = self.get_assert_metric(uri, "related_objects") self.assertEqual(rv.status_code, 404) self.logout() self.login(username="gamma") database = get_example_database() uri = f"api/v1/database/{database.id}/related_objects/" rv = self.get_assert_metric(uri, "related_objects") self.assertEqual(rv.status_code, 404) def test_export_database(self): """ Database API: Test export database """ self.login(username="admin") database = get_example_database() argument = [database.id] uri = f"api/v1/database/export/?q={prison.dumps(argument)}" rv = self.get_assert_metric(uri, "export") assert rv.status_code == 200 buf = BytesIO(rv.data) assert is_zipfile(buf) def test_export_database_not_allowed(self): """ Database API: Test export database not allowed """ self.login(username="gamma") database = get_example_database() argument = [database.id] uri = f"api/v1/database/export/?q={prison.dumps(argument)}" rv = self.client.get(uri) # export only requires can_read now, but gamma need to have explicit access to # view the database assert rv.status_code == 404 def test_export_database_non_existing(self): """ Database API: Test export database not allowed """ max_id = db.session.query(func.max(Database.id)).scalar() # id does not exist and we get 404 invalid_id = max_id + 1 self.login(username="admin") argument = [invalid_id] uri = f"api/v1/database/export/?q={prison.dumps(argument)}" rv = self.get_assert_metric(uri, "export") assert rv.status_code == 404 def test_import_database(self): """ Database API: Test import database """ self.login(username="admin") uri = "api/v1/database/import/" buf = self.create_database_import() form_data = { "formData": (buf, "database_export.zip"), } rv = self.client.post(uri, data=form_data, content_type="multipart/form-data") response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 200 assert response == {"message": "OK"} database = ( db.session.query(Database).filter_by(uuid=database_config["uuid"]).one() ) assert database.database_name == "imported_database" assert len(database.tables) == 1 dataset = database.tables[0] assert dataset.table_name == "imported_dataset" assert str(dataset.uuid) == dataset_config["uuid"] db.session.delete(dataset) db.session.delete(database) db.session.commit() def test_import_database_overwrite(self): """ Database API: Test import existing database """ self.login(username="admin") uri = "api/v1/database/import/" buf = self.create_database_import() form_data = { "formData": (buf, "database_export.zip"), } rv = self.client.post(uri, data=form_data, content_type="multipart/form-data") response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 200 assert response == {"message": "OK"} # import again without overwrite flag buf = self.create_database_import() form_data = { "formData": (buf, "database_export.zip"), } rv = self.client.post(uri, data=form_data, content_type="multipart/form-data") response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 422 assert response == { "message": { "databases/imported_database.yaml": "Database already exists and `overwrite=true` was not passed" } } # import with overwrite flag buf = self.create_database_import() form_data = { "formData": (buf, "database_export.zip"), "overwrite": "true", } rv = self.client.post(uri, data=form_data, content_type="multipart/form-data") response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 200 assert response == {"message": "OK"} # clean up database = ( db.session.query(Database).filter_by(uuid=database_config["uuid"]).one() ) dataset = database.tables[0] db.session.delete(dataset) db.session.delete(database) db.session.commit() def test_import_database_invalid(self): """ Database API: Test import invalid database """ self.login(username="admin") uri = "api/v1/database/import/" buf = BytesIO() with ZipFile(buf, "w") as bundle: with bundle.open("database_export/metadata.yaml", "w") as fp: fp.write(yaml.safe_dump(dataset_metadata_config).encode()) with bundle.open( "database_export/databases/imported_database.yaml", "w" ) as fp: fp.write(yaml.safe_dump(database_config).encode()) with bundle.open( "database_export/datasets/imported_dataset.yaml", "w" ) as fp: fp.write(yaml.safe_dump(dataset_config).encode()) buf.seek(0) form_data = { "formData": (buf, "database_export.zip"), } rv = self.client.post(uri, data=form_data, content_type="multipart/form-data") response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 422 assert response == { "message": {"metadata.yaml": {"type": ["Must be equal to Database."]}} } def test_import_database_masked_password(self): """ Database API: Test import database with masked password """ self.login(username="admin") uri = "api/v1/database/import/" masked_database_config = database_config.copy() masked_database_config[ "sqlalchemy_uri" ] = "postgresql://username:XXXXXXXXXX@host:12345/db" buf = BytesIO() with ZipFile(buf, "w") as bundle: with bundle.open("database_export/metadata.yaml", "w") as fp: fp.write(yaml.safe_dump(database_metadata_config).encode()) with bundle.open( "database_export/databases/imported_database.yaml", "w" ) as fp: fp.write(yaml.safe_dump(masked_database_config).encode()) with bundle.open( "database_export/datasets/imported_dataset.yaml", "w" ) as fp: fp.write(yaml.safe_dump(dataset_config).encode()) buf.seek(0) form_data = { "formData": (buf, "database_export.zip"), } rv = self.client.post(uri, data=form_data, content_type="multipart/form-data") response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 422 assert response == { "message": { "databases/imported_database.yaml": { "_schema": ["Must provide a password for the database"] } } } def test_import_database_masked_password_provided(self): """ Database API: Test import database with masked password provided """ self.login(username="admin") uri = "api/v1/database/import/" masked_database_config = database_config.copy() masked_database_config[ "sqlalchemy_uri" ] = "postgresql://username:XXXXXXXXXX@host:12345/db" buf = BytesIO() with ZipFile(buf, "w") as bundle: with bundle.open("database_export/metadata.yaml", "w") as fp: fp.write(yaml.safe_dump(database_metadata_config).encode()) with bundle.open( "database_export/databases/imported_database.yaml", "w" ) as fp: fp.write(yaml.safe_dump(masked_database_config).encode()) buf.seek(0) form_data = { "formData": (buf, "database_export.zip"), "passwords": json.dumps({"databases/imported_database.yaml": "SECRET"}), } rv = self.client.post(uri, data=form_data, content_type="multipart/form-data") response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 200 assert response == {"message": "OK"} database = ( db.session.query(Database).filter_by(uuid=database_config["uuid"]).one() ) assert database.database_name == "imported_database" assert ( database.sqlalchemy_uri == "postgresql://username:XXXXXXXXXX@host:12345/db" ) assert database.password == "SECRET" db.session.delete(database) db.session.commit()
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # isort:skip_file # pylint: disable=invalid-name, no-self-use, too-many-public-methods, too-many-arguments """Unit tests for Superset""" import json from io import BytesIO from unittest import mock from zipfile import is_zipfile, ZipFile import prison import pytest import yaml from sqlalchemy.sql import func from superset import db, security_manager from superset.connectors.sqla.models import SqlaTable from superset.models.core import Database from superset.models.reports import ReportSchedule, ReportScheduleType from superset.utils.core import get_example_database, get_main_database from tests.base_tests import SupersetTestCase from tests.fixtures.certificates import ssl_certificate from tests.fixtures.energy_dashboard import load_energy_table_with_slice from tests.fixtures.importexport import ( database_config, dataset_config, database_metadata_config, dataset_metadata_config, ) from tests.fixtures.unicode_dashboard import load_unicode_dashboard_with_position from tests.test_app import app class TestDatabaseApi(SupersetTestCase): def insert_database( self, database_name: str, sqlalchemy_uri: str, extra: str = "", encrypted_extra: str = "", server_cert: str = "", expose_in_sqllab: bool = False, ) -> Database: database = Database( database_name=database_name, sqlalchemy_uri=sqlalchemy_uri, extra=extra, encrypted_extra=encrypted_extra, server_cert=server_cert, expose_in_sqllab=expose_in_sqllab, ) db.session.add(database) db.session.commit() return database @pytest.fixture() def create_database_with_report(self): with self.create_app().app_context(): example_db = get_example_database() database = self.insert_database( "database_with_report", example_db.sqlalchemy_uri_decrypted, expose_in_sqllab=True, ) report_schedule = ReportSchedule( type=ReportScheduleType.ALERT, name="report_with_database", crontab="* * * * *", database=database, ) db.session.add(report_schedule) db.session.commit() yield database # rollback changes db.session.delete(report_schedule) db.session.delete(database) db.session.commit() def create_database_import(self): buf = BytesIO() with ZipFile(buf, "w") as bundle: with bundle.open("database_export/metadata.yaml", "w") as fp: fp.write(yaml.safe_dump(database_metadata_config).encode()) with bundle.open( "database_export/databases/imported_database.yaml", "w" ) as fp: fp.write(yaml.safe_dump(database_config).encode()) with bundle.open( "database_export/datasets/imported_dataset.yaml", "w" ) as fp: fp.write(yaml.safe_dump(dataset_config).encode()) buf.seek(0) return buf def test_get_items(self): """ Database API: Test get items """ self.login(username="admin") uri = "api/v1/database/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 200) response = json.loads(rv.data.decode("utf-8")) expected_columns = [ "allow_csv_upload", "allow_ctas", "allow_cvas", "allow_dml", "allow_multi_schema_metadata_fetch", "allow_run_async", "allows_cost_estimate", "allows_subquery", "allows_virtual_table_explore", "backend", "changed_on", "changed_on_delta_humanized", "created_by", "database_name", "explore_database_id", "expose_in_sqllab", "force_ctas_schema", "function_names", "id", ] self.assertGreater(response["count"], 0) self.assertEqual(list(response["result"][0].keys()), expected_columns) def test_get_items_filter(self): """ Database API: Test get items with filter """ example_db = get_example_database() test_database = self.insert_database( "test-database", example_db.sqlalchemy_uri_decrypted, expose_in_sqllab=True ) dbs = db.session.query(Database).filter_by(expose_in_sqllab=True).all() self.login(username="admin") arguments = { "keys": ["none"], "filters": [{"col": "expose_in_sqllab", "opr": "eq", "value": True}], "order_columns": "database_name", "order_direction": "asc", "page": 0, "page_size": -1, } uri = f"api/v1/database/?q={prison.dumps(arguments)}" rv = self.client.get(uri) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(rv.status_code, 200) self.assertEqual(response["count"], len(dbs)) # Cleanup db.session.delete(test_database) db.session.commit() def test_get_items_not_allowed(self): """ Database API: Test get items not allowed """ self.login(username="gamma") uri = "api/v1/database/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 200) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(response["count"], 0) def test_create_database(self): """ Database API: Test create """ extra = { "metadata_params": {}, "engine_params": {}, "metadata_cache_timeout": {}, "schemas_allowed_for_csv_upload": [], } self.login(username="admin") example_db = get_example_database() if example_db.backend == "sqlite": return database_data = { "database_name": "test-create-database", "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, "server_cert": ssl_certificate, "extra": json.dumps(extra), } uri = "api/v1/database/" rv = self.client.post(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(rv.status_code, 201) # Cleanup model = db.session.query(Database).get(response.get("id")) db.session.delete(model) db.session.commit() def test_create_database_server_cert_validate(self): """ Database API: Test create server cert validation """ example_db = get_example_database() if example_db.backend == "sqlite": return self.login(username="admin") database_data = { "database_name": "test-create-database-invalid-cert", "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, "server_cert": "INVALID CERT", } uri = "api/v1/database/" rv = self.client.post(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) expected_response = {"message": {"server_cert": ["Invalid certificate"]}} self.assertEqual(rv.status_code, 400) self.assertEqual(response, expected_response) def test_create_database_json_validate(self): """ Database API: Test create encrypted extra and extra validation """ example_db = get_example_database() if example_db.backend == "sqlite": return self.login(username="admin") database_data = { "database_name": "test-create-database-invalid-json", "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, "encrypted_extra": '{"A": "a", "B", "C"}', "extra": '["A": "a", "B", "C"]', } uri = "api/v1/database/" rv = self.client.post(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) expected_response = { "message": { "encrypted_extra": [ "Field cannot be decoded by JSON. Expecting ':' " "delimiter: line 1 column 15 (char 14)" ], "extra": [ "Field cannot be decoded by JSON. Expecting ','" " delimiter: line 1 column 5 (char 4)" ], } } self.assertEqual(rv.status_code, 400) self.assertEqual(response, expected_response) def test_create_database_extra_metadata_validate(self): """ Database API: Test create extra metadata_params validation """ example_db = get_example_database() if example_db.backend == "sqlite": return extra = { "metadata_params": {"wrong_param": "some_value"}, "engine_params": {}, "metadata_cache_timeout": {}, "schemas_allowed_for_csv_upload": [], } self.login(username="admin") database_data = { "database_name": "test-create-database-invalid-extra", "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, "extra": json.dumps(extra), } uri = "api/v1/database/" rv = self.client.post(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) expected_response = { "message": { "extra": [ "The metadata_params in Extra field is not configured correctly." " The key wrong_param is invalid." ] } } self.assertEqual(rv.status_code, 400) self.assertEqual(response, expected_response) def test_create_database_unique_validate(self): """ Database API: Test create database_name already exists """ example_db = get_example_database() if example_db.backend == "sqlite": return self.login(username="admin") database_data = { "database_name": "examples", "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, } uri = "api/v1/database/" rv = self.client.post(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) expected_response = { "message": {"database_name": "A database with the same name already exists"} } self.assertEqual(rv.status_code, 422) self.assertEqual(response, expected_response) def test_create_database_uri_validate(self): """ Database API: Test create fail validate sqlalchemy uri """ self.login(username="admin") database_data = { "database_name": "test-database-invalid-uri", "sqlalchemy_uri": "wrong_uri", } uri = "api/v1/database/" rv = self.client.post(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(rv.status_code, 400) self.assertIn( "Invalid connection string", response["message"]["sqlalchemy_uri"][0], ) @mock.patch( "superset.views.core.app.config", {**app.config, "PREVENT_UNSAFE_DB_CONNECTIONS": True}, ) def test_create_database_fail_sqllite(self): """ Database API: Test create fail with sqllite """ database_data = { "database_name": "test-create-sqlite-database", "sqlalchemy_uri": "sqlite:////some.db", } uri = "api/v1/database/" self.login(username="admin") response = self.client.post(uri, json=database_data) response_data = json.loads(response.data.decode("utf-8")) expected_response = { "message": { "sqlalchemy_uri": [ "SQLite database cannot be used as a data source " "for security reasons." ] } } self.assertEqual(response_data, expected_response) self.assertEqual(response.status_code, 400) def test_create_database_conn_fail(self): """ Database API: Test create fails connection """ example_db = get_example_database() if example_db.backend in ("sqlite", "hive", "presto"): return example_db.password = "wrong_password" database_data = { "database_name": "test-create-database-wrong-password", "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, } uri = "api/v1/database/" self.login(username="admin") response = self.client.post(uri, json=database_data) response_data = json.loads(response.data.decode("utf-8")) expected_response = {"message": "Could not connect to database."} self.assertEqual(response.status_code, 422) self.assertEqual(response_data, expected_response) def test_update_database(self): """ Database API: Test update """ example_db = get_example_database() test_database = self.insert_database( "test-database", example_db.sqlalchemy_uri_decrypted ) self.login(username="admin") database_data = {"database_name": "test-database-updated"} uri = f"api/v1/database/{test_database.id}" rv = self.client.put(uri, json=database_data) self.assertEqual(rv.status_code, 200) # Cleanup model = db.session.query(Database).get(test_database.id) db.session.delete(model) db.session.commit() def test_update_database_conn_fail(self): """ Database API: Test update fails connection """ example_db = get_example_database() if example_db.backend in ("sqlite", "hive", "presto"): return test_database = self.insert_database( "test-database1", example_db.sqlalchemy_uri_decrypted ) example_db.password = "wrong_password" database_data = { "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, } uri = f"api/v1/database/{test_database.id}" self.login(username="admin") rv = self.client.put(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) expected_response = {"message": "Could not connect to database."} self.assertEqual(rv.status_code, 422) self.assertEqual(response, expected_response) # Cleanup model = db.session.query(Database).get(test_database.id) db.session.delete(model) db.session.commit() def test_update_database_uniqueness(self): """ Database API: Test update uniqueness """ example_db = get_example_database() test_database1 = self.insert_database( "test-database1", example_db.sqlalchemy_uri_decrypted ) test_database2 = self.insert_database( "test-database2", example_db.sqlalchemy_uri_decrypted ) self.login(username="admin") database_data = {"database_name": "test-database2"} uri = f"api/v1/database/{test_database1.id}" rv = self.client.put(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) expected_response = { "message": {"database_name": "A database with the same name already exists"} } self.assertEqual(rv.status_code, 422) self.assertEqual(response, expected_response) # Cleanup db.session.delete(test_database1) db.session.delete(test_database2) db.session.commit() def test_update_database_invalid(self): """ Database API: Test update invalid request """ self.login(username="admin") database_data = {"database_name": "test-database-updated"} uri = "api/v1/database/invalid" rv = self.client.put(uri, json=database_data) self.assertEqual(rv.status_code, 404) def test_update_database_uri_validate(self): """ Database API: Test update sqlalchemy_uri validate """ example_db = get_example_database() test_database = self.insert_database( "test-database", example_db.sqlalchemy_uri_decrypted ) self.login(username="admin") database_data = { "database_name": "test-database-updated", "sqlalchemy_uri": "wrong_uri", } uri = f"api/v1/database/{test_database.id}" rv = self.client.put(uri, json=database_data) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(rv.status_code, 400) self.assertIn( "Invalid connection string", response["message"]["sqlalchemy_uri"][0], ) db.session.delete(test_database) db.session.commit() def test_delete_database(self): """ Database API: Test delete """ database_id = self.insert_database("test-database", "test_uri").id self.login(username="admin") uri = f"api/v1/database/{database_id}" rv = self.delete_assert_metric(uri, "delete") self.assertEqual(rv.status_code, 200) model = db.session.query(Database).get(database_id) self.assertEqual(model, None) def test_delete_database_not_found(self): """ Database API: Test delete not found """ max_id = db.session.query(func.max(Database.id)).scalar() self.login(username="admin") uri = f"api/v1/database/{max_id + 1}" rv = self.delete_assert_metric(uri, "delete") self.assertEqual(rv.status_code, 404) def test_delete_database_with_datasets(self): """ Database API: Test delete fails because it has depending datasets """ database_id = ( db.session.query(Database).filter_by(database_name="examples").one() ).id self.login(username="admin") uri = f"api/v1/database/{database_id}" rv = self.delete_assert_metric(uri, "delete") self.assertEqual(rv.status_code, 422) @pytest.mark.usefixtures("create_database_with_report") def test_delete_database_with_report(self): """ Database API: Test delete with associated report """ self.login(username="admin") database = ( db.session.query(Database) .filter(Database.database_name == "database_with_report") .one_or_none() ) uri = f"api/v1/database/{database.id}" rv = self.client.delete(uri) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(rv.status_code, 422) expected_response = { "message": "There are associated alerts or reports: report_with_database" } self.assertEqual(response, expected_response) def test_get_table_metadata(self): """ Database API: Test get table metadata info """ example_db = get_example_database() self.login(username="admin") uri = f"api/v1/database/{example_db.id}/table/birth_names/null/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 200) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(response["name"], "birth_names") self.assertIsNone(response["comment"]) self.assertTrue(len(response["columns"]) > 5) self.assertTrue(response.get("selectStar").startswith("SELECT")) def test_info_security_database(self): """ Database API: Test info security """ self.login(username="admin") params = {"keys": ["permissions"]} uri = f"api/v1/database/_info?q={prison.dumps(params)}" rv = self.get_assert_metric(uri, "info") data = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 200 assert "can_read" in data["permissions"] assert "can_write" in data["permissions"] assert len(data["permissions"]) == 2 def test_get_invalid_database_table_metadata(self): """ Database API: Test get invalid database from table metadata """ database_id = 1000 self.login(username="admin") uri = f"api/v1/database/{database_id}/table/some_table/some_schema/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 404) uri = "api/v1/database/some_database/table/some_table/some_schema/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 404) def test_get_invalid_table_table_metadata(self): """ Database API: Test get invalid table from table metadata """ example_db = get_example_database() uri = f"api/v1/database/{example_db.id}/wrong_table/null/" self.login(username="admin") rv = self.client.get(uri) self.assertEqual(rv.status_code, 404) def test_get_table_metadata_no_db_permission(self): """ Database API: Test get table metadata from not permitted db """ self.login(username="gamma") example_db = get_example_database() uri = f"api/v1/database/{example_db.id}/birth_names/null/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 404) def test_get_select_star(self): """ Database API: Test get select star """ self.login(username="admin") example_db = get_example_database() uri = f"api/v1/database/{example_db.id}/select_star/birth_names/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 200) response = json.loads(rv.data.decode("utf-8")) self.assertIn("gender", response["result"]) def test_get_select_star_not_allowed(self): """ Database API: Test get select star not allowed """ self.login(username="gamma") example_db = get_example_database() uri = f"api/v1/database/{example_db.id}/select_star/birth_names/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 404) def test_get_select_star_datasource_access(self): """ Database API: Test get select star with datasource access """ session = db.session table = SqlaTable( schema="main", table_name="ab_permission", database=get_main_database() ) session.add(table) session.commit() tmp_table_perm = security_manager.find_permission_view_menu( "datasource_access", table.get_perm() ) gamma_role = security_manager.find_role("Gamma") security_manager.add_permission_role(gamma_role, tmp_table_perm) self.login(username="gamma") main_db = get_main_database() uri = f"api/v1/database/{main_db.id}/select_star/ab_permission/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 200) # rollback changes security_manager.del_permission_role(gamma_role, tmp_table_perm) db.session.delete(table) db.session.delete(main_db) db.session.commit() def test_get_select_star_not_found_database(self): """ Database API: Test get select star not found database """ self.login(username="admin") max_id = db.session.query(func.max(Database.id)).scalar() uri = f"api/v1/database/{max_id + 1}/select_star/birth_names/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 404) def test_get_select_star_not_found_table(self): """ Database API: Test get select star not found database """ self.login(username="admin") example_db = get_example_database() # sqllite will not raise a NoSuchTableError if example_db.backend == "sqlite": return uri = f"api/v1/database/{example_db.id}/select_star/table_does_not_exist/" rv = self.client.get(uri) # TODO(bkyryliuk): investigate why presto returns 500 self.assertEqual(rv.status_code, 404 if example_db.backend != "presto" else 500) def test_database_schemas(self): """ Database API: Test database schemas """ self.login("admin") database = db.session.query(Database).first() schemas = database.get_all_schema_names() rv = self.client.get(f"api/v1/database/{database.id}/schemas/") response = json.loads(rv.data.decode("utf-8")) self.assertEqual(schemas, response["result"]) rv = self.client.get( f"api/v1/database/{database.id}/schemas/?q={prison.dumps({'force': True})}" ) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(schemas, response["result"]) def test_database_schemas_not_found(self): """ Database API: Test database schemas not found """ self.logout() self.login(username="gamma") example_db = get_example_database() uri = f"api/v1/database/{example_db.id}/schemas/" rv = self.client.get(uri) self.assertEqual(rv.status_code, 404) def test_database_schemas_invalid_query(self): """ Database API: Test database schemas with invalid query """ self.login("admin") database = db.session.query(Database).first() rv = self.client.get( f"api/v1/database/{database.id}/schemas/?q={prison.dumps({'force': 'nop'})}" ) self.assertEqual(rv.status_code, 400) def test_test_connection(self): """ Database API: Test test connection """ extra = { "metadata_params": {}, "engine_params": {}, "metadata_cache_timeout": {}, "schemas_allowed_for_csv_upload": [], } # need to temporarily allow sqlite dbs, teardown will undo this app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = False self.login("admin") example_db = get_example_database() # validate that the endpoint works with the password-masked sqlalchemy uri data = { "database_name": "examples", "encrypted_extra": "{}", "extra": json.dumps(extra), "impersonate_user": False, "sqlalchemy_uri": example_db.safe_sqlalchemy_uri(), "server_cert": ssl_certificate, } url = "api/v1/database/test_connection" rv = self.post_assert_metric(url, data, "test_connection") self.assertEqual(rv.status_code, 200) self.assertEqual(rv.headers["Content-Type"], "application/json; charset=utf-8") # validate that the endpoint works with the decrypted sqlalchemy uri data = { "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, "database_name": "examples", "impersonate_user": False, "extra": json.dumps(extra), "server_cert": None, } rv = self.post_assert_metric(url, data, "test_connection") self.assertEqual(rv.status_code, 200) self.assertEqual(rv.headers["Content-Type"], "application/json; charset=utf-8") def test_test_connection_failed(self): """ Database API: Test test connection failed """ self.login("admin") data = { "sqlalchemy_uri": "broken://url", "database_name": "examples", "impersonate_user": False, "server_cert": None, } url = "api/v1/database/test_connection" rv = self.post_assert_metric(url, data, "test_connection") self.assertEqual(rv.status_code, 400) self.assertEqual(rv.headers["Content-Type"], "application/json; charset=utf-8") response = json.loads(rv.data.decode("utf-8")) expected_response = { "driver_name": "broken", "message": "Could not load database driver: broken", } self.assertEqual(response, expected_response) data = { "sqlalchemy_uri": "mssql+pymssql://url", "database_name": "examples", "impersonate_user": False, "server_cert": None, } rv = self.post_assert_metric(url, data, "test_connection") self.assertEqual(rv.status_code, 400) self.assertEqual(rv.headers["Content-Type"], "application/json; charset=utf-8") response = json.loads(rv.data.decode("utf-8")) expected_response = { "driver_name": "mssql+pymssql", "message": "Could not load database driver: mssql+pymssql", } self.assertEqual(response, expected_response) def test_test_connection_unsafe_uri(self): """ Database API: Test test connection with unsafe uri """ self.login("admin") app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = True data = { "sqlalchemy_uri": "sqlite:///home/superset/unsafe.db", "database_name": "unsafe", "impersonate_user": False, "server_cert": None, } url = "api/v1/database/test_connection" rv = self.post_assert_metric(url, data, "test_connection") self.assertEqual(rv.status_code, 400) response = json.loads(rv.data.decode("utf-8")) expected_response = { "message": { "sqlalchemy_uri": [ "SQLite database cannot be used as a data source for security reasons." ] } } self.assertEqual(response, expected_response) app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = False @pytest.mark.usefixtures( "load_unicode_dashboard_with_position", "load_energy_table_with_slice" ) def test_get_database_related_objects(self): """ Database API: Test get chart and dashboard count related to a database :return: """ self.login(username="admin") database = get_example_database() uri = f"api/v1/database/{database.id}/related_objects/" rv = self.get_assert_metric(uri, "related_objects") self.assertEqual(rv.status_code, 200) response = json.loads(rv.data.decode("utf-8")) self.assertEqual(response["charts"]["count"], 33) self.assertEqual(response["dashboards"]["count"], 3) def test_get_database_related_objects_not_found(self): """ Database API: Test related objects not found """ max_id = db.session.query(func.max(Database.id)).scalar() # id does not exist and we get 404 invalid_id = max_id + 1 uri = f"api/v1/database/{invalid_id}/related_objects/" self.login(username="admin") rv = self.get_assert_metric(uri, "related_objects") self.assertEqual(rv.status_code, 404) self.logout() self.login(username="gamma") database = get_example_database() uri = f"api/v1/database/{database.id}/related_objects/" rv = self.get_assert_metric(uri, "related_objects") self.assertEqual(rv.status_code, 404) def test_export_database(self): """ Database API: Test export database """ self.login(username="admin") database = get_example_database() argument = [database.id] uri = f"api/v1/database/export/?q={prison.dumps(argument)}" rv = self.get_assert_metric(uri, "export") assert rv.status_code == 200 buf = BytesIO(rv.data) assert is_zipfile(buf) def test_export_database_not_allowed(self): """ Database API: Test export database not allowed """ self.login(username="gamma") database = get_example_database() argument = [database.id] uri = f"api/v1/database/export/?q={prison.dumps(argument)}" rv = self.client.get(uri) # export only requires can_read now, but gamma need to have explicit access to # view the database assert rv.status_code == 404 def test_export_database_non_existing(self): """ Database API: Test export database not allowed """ max_id = db.session.query(func.max(Database.id)).scalar() # id does not exist and we get 404 invalid_id = max_id + 1 self.login(username="admin") argument = [invalid_id] uri = f"api/v1/database/export/?q={prison.dumps(argument)}" rv = self.get_assert_metric(uri, "export") assert rv.status_code == 404 def test_import_database(self): """ Database API: Test import database """ self.login(username="admin") uri = "api/v1/database/import/" buf = self.create_database_import() form_data = { "formData": (buf, "database_export.zip"), } rv = self.client.post(uri, data=form_data, content_type="multipart/form-data") response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 200 assert response == {"message": "OK"} database = ( db.session.query(Database).filter_by(uuid=database_config["uuid"]).one() ) assert database.database_name == "imported_database" assert len(database.tables) == 1 dataset = database.tables[0] assert dataset.table_name == "imported_dataset" assert str(dataset.uuid) == dataset_config["uuid"] db.session.delete(dataset) db.session.delete(database) db.session.commit() def test_import_database_overwrite(self): """ Database API: Test import existing database """ self.login(username="admin") uri = "api/v1/database/import/" buf = self.create_database_import() form_data = { "formData": (buf, "database_export.zip"), } rv = self.client.post(uri, data=form_data, content_type="multipart/form-data") response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 200 assert response == {"message": "OK"} # import again without overwrite flag buf = self.create_database_import() form_data = { "formData": (buf, "database_export.zip"), } rv = self.client.post(uri, data=form_data, content_type="multipart/form-data") response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 422 assert response == { "message": { "databases/imported_database.yaml": "Database already exists and `overwrite=true` was not passed" } } # import with overwrite flag buf = self.create_database_import() form_data = { "formData": (buf, "database_export.zip"), "overwrite": "true", } rv = self.client.post(uri, data=form_data, content_type="multipart/form-data") response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 200 assert response == {"message": "OK"} # clean up database = ( db.session.query(Database).filter_by(uuid=database_config["uuid"]).one() ) dataset = database.tables[0] db.session.delete(dataset) db.session.delete(database) db.session.commit() def test_import_database_invalid(self): """ Database API: Test import invalid database """ self.login(username="admin") uri = "api/v1/database/import/" buf = BytesIO() with ZipFile(buf, "w") as bundle: with bundle.open("database_export/metadata.yaml", "w") as fp: fp.write(yaml.safe_dump(dataset_metadata_config).encode()) with bundle.open( "database_export/databases/imported_database.yaml", "w" ) as fp: fp.write(yaml.safe_dump(database_config).encode()) with bundle.open( "database_export/datasets/imported_dataset.yaml", "w" ) as fp: fp.write(yaml.safe_dump(dataset_config).encode()) buf.seek(0) form_data = { "formData": (buf, "database_export.zip"), } rv = self.client.post(uri, data=form_data, content_type="multipart/form-data") response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 422 assert response == { "message": {"metadata.yaml": {"type": ["Must be equal to Database."]}} } def test_import_database_masked_password(self): """ Database API: Test import database with masked password """ self.login(username="admin") uri = "api/v1/database/import/" masked_database_config = database_config.copy() masked_database_config[ "sqlalchemy_uri" ] = "postgresql://username:XXXXXXXXXX@host:12345/db" buf = BytesIO() with ZipFile(buf, "w") as bundle: with bundle.open("database_export/metadata.yaml", "w") as fp: fp.write(yaml.safe_dump(database_metadata_config).encode()) with bundle.open( "database_export/databases/imported_database.yaml", "w" ) as fp: fp.write(yaml.safe_dump(masked_database_config).encode()) with bundle.open( "database_export/datasets/imported_dataset.yaml", "w" ) as fp: fp.write(yaml.safe_dump(dataset_config).encode()) buf.seek(0) form_data = { "formData": (buf, "database_export.zip"), } rv = self.client.post(uri, data=form_data, content_type="multipart/form-data") response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 422 assert response == { "message": { "databases/imported_database.yaml": { "_schema": ["Must provide a password for the database"] } } } def test_import_database_masked_password_provided(self): """ Database API: Test import database with masked password provided """ self.login(username="admin") uri = "api/v1/database/import/" masked_database_config = database_config.copy() masked_database_config[ "sqlalchemy_uri" ] = "postgresql://username:XXXXXXXXXX@host:12345/db" buf = BytesIO() with ZipFile(buf, "w") as bundle: with bundle.open("database_export/metadata.yaml", "w") as fp: fp.write(yaml.safe_dump(database_metadata_config).encode()) with bundle.open( "database_export/databases/imported_database.yaml", "w" ) as fp: fp.write(yaml.safe_dump(masked_database_config).encode()) buf.seek(0) form_data = { "formData": (buf, "database_export.zip"), "passwords": json.dumps({"databases/imported_database.yaml": "SECRET"}), } rv = self.client.post(uri, data=form_data, content_type="multipart/form-data") response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 200 assert response == {"message": "OK"} database = ( db.session.query(Database).filter_by(uuid=database_config["uuid"]).one() ) assert database.database_name == "imported_database" assert ( database.sqlalchemy_uri == "postgresql://username:XXXXXXXXXX@host:12345/db" ) assert database.password == "SECRET" db.session.delete(database) db.session.commit()
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF general model utils.""" import functools import inspect import os import re import warnings from typing import Dict, List, Optional, Union import h5py import numpy as np import tensorflow as tf from tensorflow.python.keras import backend as K from tensorflow.python.keras.saving import hdf5_format from .configuration_utils import PretrainedConfig from .file_utils import ( DUMMY_INPUTS, TF2_WEIGHTS_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_path, copy_func, hf_bucket_url, is_offline_mode, is_remote_url, ) from .generation_tf_utils import TFGenerationMixin from .tokenization_utils_base import BatchEncoding from .utils import logging logger = logging.get_logger(__name__) tf_logger = tf.get_logger() TFModelInputType = Union[ List[tf.Tensor], List[np.ndarray], Dict[str, tf.Tensor], Dict[str, np.ndarray], np.ndarray, tf.Tensor ] class TFModelUtilsMixin: """ A few utilities for :obj:`tf.keras.Model`, to be used as a mixin. """ def num_parameters(self, only_trainable: bool = False) -> int: """ Get the number of (optionally, trainable) parameters in the model. Args: only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to return only the number of trainable parameters Returns: :obj:`int`: The number of parameters. """ if only_trainable: return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables)) else: return self.count_params() def keras_serializable(cls): """ Decorate a Keras Layer class to support Keras serialization. This is done by: 1. Adding a :obj:`transformers_config` dict to the Keras config dictionary in :obj:`get_config` (called by Keras at serialization time. 2. Wrapping :obj:`__init__` to accept that :obj:`transformers_config` dict (passed by Keras at deserialization time) and convert it to a config object for the actual layer initializer. 3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not need to be supplied in :obj:`custom_objects` in the call to :obj:`tf.keras.models.load_model`. Args: cls (a :obj:`tf.keras.layers.Layers subclass`): Typically a :obj:`TF.MainLayer` class in this project, in general must accept a :obj:`config` argument to its initializer. Returns: The same class object, with modifications for Keras deserialization. """ initializer = cls.__init__ config_class = getattr(cls, "config_class", None) if config_class is None: raise AttributeError("Must set `config_class` to use @keras_serializable") @functools.wraps(initializer) def wrapped_init(self, *args, **kwargs): config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None) if isinstance(config, dict): config = config_class.from_dict(config) initializer(self, config, *args, **kwargs) elif isinstance(config, PretrainedConfig): if len(args) > 0: initializer(self, *args, **kwargs) else: initializer(self, config, *args, **kwargs) else: raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)") self._config = config self._kwargs = kwargs cls.__init__ = wrapped_init if not hasattr(cls, "get_config"): raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses") if hasattr(cls.get_config, "_is_default"): def get_config(self): cfg = super(cls, self).get_config() cfg["config"] = self._config.to_dict() cfg.update(self._kwargs) return cfg cls.get_config = get_config cls._keras_serializable = True if hasattr(tf.keras.utils, "register_keras_serializable"): cls = tf.keras.utils.register_keras_serializable()(cls) return cls class TFCausalLanguageModelingLoss: """ Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) # make sure only labels that are not equal to -100 affect the loss active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) return loss_fn(labels, reduced_logits) class TFQuestionAnsweringLoss: """ Loss function suitable for question answering. """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) start_loss = loss_fn(labels["start_position"], logits[0]) end_loss = loss_fn(labels["end_position"], logits[1]) return (start_loss + end_loss) / 2.0 class TFTokenClassificationLoss: """ Loss function suitable for token classification. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) # make sure only labels that are not equal to -100 # are taken into account as loss if tf.math.reduce_any(labels == -1): warnings.warn("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.") active_loss = tf.reshape(labels, (-1,)) != -1 else: active_loss = tf.reshape(labels, (-1,)) != -100 reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) return loss_fn(labels, reduced_logits) class TFSequenceClassificationLoss: """ Loss function suitable for sequence classification. """ def compute_loss(self, labels, logits): if len(shape_list(logits)) == 1 or shape_list(logits)[1] == 1: loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE) else: loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) return loss_fn(labels, logits) class TFMultipleChoiceLoss(TFSequenceClassificationLoss): """Loss function suitable for multiple choice tasks.""" class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss): """ Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ class TFNextSentencePredictionLoss: """ Loss function suitable for next sentence prediction (NSP), that is, the task of guessing the next sentence. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) # make sure only labels that are not equal to -100 # are taken into account as loss next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss) next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss) return loss_fn(next_sentence_label, next_sentence_reduced_logits) def booleans_processing(config, **kwargs): """ Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or graph) Args: config (:class:`~transformers.PretrainedConfig`): The config of the running model. **kwargs: The boolean parameters Returns: A dictionary with the proper values for each boolean """ final_booleans = {} if tf.executing_eagerly(): final_booleans["output_attentions"] = ( kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions ) final_booleans["output_hidden_states"] = ( kwargs["output_hidden_states"] if kwargs["output_hidden_states"] is not None else config.output_hidden_states ) final_booleans["return_dict"] = ( kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict ) if "use_cache" in kwargs: final_booleans["use_cache"] = kwargs["use_cache"] if kwargs["use_cache"] is not None else config.use_cache else: if ( kwargs["output_attentions"] is not None or kwargs["output_hidden_states"] is not None or ("use_cache" in kwargs and kwargs["use_cache"] is not None) ): tf_logger.warning( "The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model." "They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained('name', output_attentions=True)`)." ) final_booleans["output_attentions"] = config.output_attentions final_booleans["output_hidden_states"] = config.output_hidden_states if kwargs["return_dict"] is not None: tf_logger.warning( "The parameter `return_dict` cannot be set in graph mode and will always be set to `True`." ) final_booleans["return_dict"] = True if "use_cache" in kwargs: final_booleans["use_cache"] = config.use_cache return final_booleans def input_processing(func, config, input_ids, **kwargs): """ Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input has to be named accordingly to the parameters name, i.e. `input_ids = tf.keras.Input(shape=(128,), dtype='int32', name="input_ids")` otherwise the order of the tensors will not be guaranteed during the training. Args: func (:obj:`callable`): The callable function of the TensorFlow model. config (:class:`~transformers.PretrainedConfig`): The config of the running model. **kwargs: The inputs of the model. Returns: Two lists, one for the missing layers, and another one for the unexpected layers. """ signature = dict(inspect.signature(func).parameters) signature.pop("kwargs", None) signature.pop("self", None) parameter_names = list(signature.keys()) output = {} allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray) if "inputs" in kwargs["kwargs_call"]: warnings.warn( "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.", FutureWarning, ) output["input_ids"] = kwargs["kwargs_call"].pop("inputs") if "decoder_cached_states" in kwargs["kwargs_call"]: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states") if len(kwargs["kwargs_call"]) > 0: raise ValueError( f"The following keyword arguments are not supported by this model: {list(kwargs["kwargs_call"].keys())}." ) kwargs.pop("kwargs_call") for k, v in kwargs.items(): if isinstance(v, allowed_types) or v is None: output[k] = v else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") if isinstance(input_ids, (tuple, list)): for i, input in enumerate(input_ids): # EagerTensors don't allow to use the .name property so we check for a real Tensor if type(input) == tf.Tensor: # Tensor names have always the pattern `name:id` then we check only the # `name` part tensor_name = input.name.split(":")[0] if tensor_name in parameter_names: output[tensor_name] = input else: output[parameter_names[i]] = input elif isinstance(input, allowed_types) or input is None: output[parameter_names[i]] = input else: raise ValueError( f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}." ) elif isinstance(input_ids, (dict, BatchEncoding)): if "inputs" in input_ids: warnings.warn( "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.", FutureWarning, ) output["input_ids"] = input_ids.pop("inputs") if "decoder_cached_states" in input_ids: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) output["past_key_values"] = input_ids.pop("decoder_cached_states") for k, v in dict(input_ids).items(): if isinstance(v, allowed_types) or v is None: output[k] = v elif k not in parameter_names and "args" not in parameter_names: logger.warning( f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored." ) continue else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") else: if isinstance(input_ids, tf.Tensor) or input_ids is None: output[parameter_names[0]] = input_ids else: raise ValueError( f"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}." ) for name in parameter_names: if name not in list(output.keys()) and name != "args": output[name] = kwargs.pop(name, signature[name].default) # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs) # So to respect the proper output we have to add this exception if "args" in output: if output["args"] is not None and type(output["args"]) == tf.Tensor: tensor_name = output["args"].name.split(":")[0] output[tensor_name] = output["args"] else: # `args` in this case is always the first parameter, then `input_ids` output["input_ids"] = output["args"] del output["args"] if "kwargs" in output: del output["kwargs"] boolean_dict = { k: v for k, v in output.items() if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"] } output.update( booleans_processing( config=config, **boolean_dict, ) ) return output def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): """ Detect missing and unexpected layers and load the TF weights accordingly to their names and shapes. Args: model (:obj:`tf.keras.models.Model`): The model to load the weights into. resolved_archive_file (:obj:`str`): The location of the H5 file. ignore_mismatched_sizes (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to ignore weights with shapes that don't match between the checkpoint of the model. Returns: Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the mismatched layers. """ missing_layers = [] unexpected_layers = [] mismatched_layers = [] # Read the H5 file with h5py.File(resolved_archive_file, "r") as f: # Retrieve the name of each layer from the H5 file saved_h5_model_layers_name = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names")) # Find the missing layers from the high level list of layers missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name) # Find the unexpected layers from the high level list of layers unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers])) saved_weight_names_set = set() symbolic_weights_names = set() weight_value_tuples = [] # Compute missing and unexpected sub layers # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...] for layer in model.layers: # if layer_name from the H5 file belongs to the layers from the instantiated model if layer.name in saved_h5_model_layers_name: # Get the H5 layer object from its name h5_layer_object = f[layer.name] # Get all the weights as a list from the layer object symbolic_weights = layer.trainable_weights + layer.non_trainable_weights saved_weights = {} # Create a dict from the H5 saved model that looks like {"weight_name": weight_value} # And a set with only the names for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, "weight_names"): # TF names always start with the model name so we ignore it name = "/".join(weight_name.split("/")[1:]) if _prefix is not None: name = _prefix + "/" + name saved_weights[name] = np.asarray(h5_layer_object[weight_name]) # Add the updated name to the final list for computing missing/unexpected values saved_weight_names_set.add(name) # Loop over each weights from the instantiated model and compare with the weights from the H5 file for symbolic_weight in symbolic_weights: # TF names always start with the model name so we ignore it if _prefix is not None: delimeter = len(_prefix.split("/")) symbolic_weight_name = "/".join( symbolic_weight.name.split("/")[:delimeter] + symbolic_weight.name.split("/")[delimeter + 1 :] ) else: symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:]) # here we check if the current weight is among the weights from the H5 file # If yes, get the weight_value of the corresponding weight from the H5 file # If not, make the value to None saved_weight_value = saved_weights.get(symbolic_weight_name, None) # Add the updated name to the final list for computing missing/unexpected values symbolic_weights_names.add(symbolic_weight_name) # If the current weight is found if saved_weight_value is not None: # Check if the shape of the current weight and the one from the H5 file are different if K.int_shape(symbolic_weight) != saved_weight_value.shape: # If yes we reshape the weight from the H5 file accordingly to the current weight # If the two shapes are not compatible we raise an issue try: array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight)) except ValueError as e: if ignore_mismatched_sizes: mismatched_layers.append( (symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight)) ) continue else: raise e else: array = saved_weight_value # We create the tuple that will be loaded and add it to the final list weight_value_tuples.append((symbolic_weight, array)) # Load all the weights K.batch_set_value(weight_value_tuples) # Compute the missing and unexpected layers missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set)) unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names)) return missing_layers, unexpected_layers, mismatched_layers def init_copy_embeddings(old_embeddings, new_num_tokens): r""" This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be kept or not. Example: - if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4] - mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1] - if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5] - mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4] """ old_num_tokens, old_embedding_dim = shape_list(old_embeddings) size_diff = new_num_tokens - old_num_tokens # initialize new embeddings # Copy token embeddings from the previous ones if tf.math.greater(size_diff, 0): # if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size # and we create a mask to properly identify the padded values and be replaced by the values of the newly created # embeddings current_weights = tf.pad( old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1 ) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True) mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False) else: # if the new size if lower than the old one, we take the current embeddings until the new size current_weights = tf.slice( old_embeddings.value(), tf.convert_to_tensor([0, 0]), tf.convert_to_tensor([new_num_tokens, old_embedding_dim]), ) mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True) return mask, current_weights class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin): r""" Base class for all TF models. :class:`~transformers.TFPreTrainedModel` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to: * resize the input embeddings, * prune heads in the self-attention heads. Class attributes (overridden by derived classes): - **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture. - **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. """ config_class = None base_model_prefix = "" # a list of re pattern of tensor names to ignore from the model when loading the model weights # (and avoid unnecessary warnings). _keys_to_ignore_on_load_missing = None # a list of re pattern of tensor names to ignore from the weights when loading the model weights # (and avoid unnecessary warnings). _keys_to_ignore_on_load_unexpected = None _requires_load_weight_prefix = False @property def dummy_inputs(self) -> Dict[str, tf.Tensor]: """ Dummy inputs to build the network. Returns: :obj:`Dict[str, tf.Tensor]`: The dummy inputs. """ return { "input_ids": tf.constant(DUMMY_INPUTS), } def __init__(self, config, *inputs, **kwargs): super().__init__(*inputs, **kwargs) if not isinstance(config, PretrainedConfig): raise ValueError( f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " "`PretrainedConfig`. To create a model from a pretrained model use " f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" ) # Save config and origin of the pretrained weights if given in model self.config = config self.name_or_path = config.name_or_path @classmethod def _from_config(cls, config, **kwargs): """ All context managers that the model should be initialized under go here. """ return cls(config, **kwargs) @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), } ] ) def serving(self, inputs): """ Method used for serving the model. Args: inputs (:obj:`Dict[str, tf.Tensor]`): The input of the saved model as a dictionary of tensors. """ output = self.call(inputs) return self.serving_output(output) def serving_output(output): """ Prepare the output of the saved model. Each model must implement this function. Args: output (:obj:`~transformers.TFBaseModelOutput`): The output returned by the model. """ raise NotImplementedError def get_input_embeddings(self) -> tf.keras.layers.Layer: """ Returns the model's input embeddings layer. Returns: :obj:`tf.Variable`: The embeddings layer mapping vocabulary to hidden states. """ main_layer = getattr(self, self.base_model_prefix, self) if main_layer is not self: return main_layer.get_input_embeddings() else: raise NotImplementedError def set_input_embeddings(self, value): """ Set model's input embeddings Args: value (:obj:`tf.Variable`): The new weights mapping hidden states to vocabulary. """ main_layer = getattr(self, self.base_model_prefix) if main_layer is None: raise NotImplementedError("The model does not implements the base_model_prefix attribute.") try: main_layer.set_input_embeddings(value) except AttributeError: logger.info("Building the model") self(self.dummy_inputs) main_layer.set_input_embeddings(value) def get_output_embeddings(self) -> Union[None, tf.keras.layers.Layer]: """ Returns the model's output embeddings Returns: :obj:`tf.Variable`: The new weights mapping vocabulary to hidden states. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() return lm_head.get_output_embeddings() return None # Overwrite for models with output embeddings def set_output_embeddings(self, value): """ Set model's output embeddings Args: value (:obj:`tf.Variable`): The new weights mapping hidden states to vocabulary. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: lm_head.set_output_embeddings(value) except AttributeError: logger.info("Building the model") self(self.dummy_inputs) lm_head.set_output_embeddings(value) def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]: """ Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the embeddings Return: :obj:`tf.keras.layers.Layer`: The layer that handles the bias, None if not an LM model. """ warnings.warn( "The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning ) return self.get_lm_head() def get_prefix_bias_name(self) -> Union[None, str]: """ Get the concatenated _prefix name of the bias from the model name to the parent layer Return: :obj:`str`: The _prefix name of the bias. """ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return None def get_bias(self) -> Union[None, Dict[str, tf.Variable]]: """ Dict of bias attached to an LM head. The key represents the name of the bias attribute. Return: :obj:`tf.Variable`: The weights representing the bias, None if not an LM model. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: return lm_head.get_bias() except AttributeError: self(self.dummy_inputs) return lm_head.get_bias() return None def set_bias(self, value): """ Set all the bias in the LM head. Args: value (:obj:`Dict[tf.Variable]`): All the new bias attached to an LM head. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: lm_head.set_bias(value) except AttributeError: self(self.dummy_inputs) lm_head.set_bias(value) def get_lm_head(self) -> tf.keras.layers.Layer: """ The LM Head layer. This method must be overwritten by all the models that have a lm head. Return: :obj:`tf.keras.layers.Layer`: The LM head layer if the model has one, None if not. """ return None def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable: """ Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`. Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method. Arguments: new_num_tokens (:obj:`int`, `optional`): The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens :obj:`tf.Variable` module of the model without doing anything. Return: :obj:`tf.Variable`: Pointer to the input tokens Embeddings Module of the model. """ if new_num_tokens is None or new_num_tokens == self.config.vocab_size: return self._get_word_embedding_weight(self.get_input_embeddings()) model_embeds = self._resize_token_embeddings(new_num_tokens) # Update base model and current model config self.config.vocab_size = new_num_tokens return model_embeds def _get_word_embedding_weight(model, embedding_layer): embeds = getattr(embedding_layer, "weight", None) if embeds is not None: return embeds embeds = getattr(embedding_layer, "decoder", None) if embeds is not None: return embeds # The reason why the attributes don't exist might be # because the model is not built, so retry getting # the argument after building the model model(model.dummy_inputs) embeds = getattr(embedding_layer, "weight", None) if embeds is not None: return embeds embeds = getattr(embedding_layer, "decoder", None) if embeds is not None: return embeds return None def _resize_token_embeddings(self, new_num_tokens): old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings()) new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) # if word embeddings are not tied, make sure that lm head bias is resized as well if self.get_bias() is not None: old_lm_head_bias = self.get_bias() new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens) self.set_bias(new_lm_head_bias) # if word embeddings are not tied, make sure that lm head decoder is resized as well if self.get_output_embeddings() is not None: old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings()) new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens) self.set_output_embeddings(new_lm_head_decoder) self.set_input_embeddings(new_embeddings) return self.get_input_embeddings() def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens): """ Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head_bias (:obj:`tf.Variable`): Old lm head bias to be resized. new_num_tokens (:obj:`int`, `optional`): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns None Return: :obj:`tf.Variable`: Pointer to the resized bias. """ new_lm_head_bias = {} for attr, weight in old_lm_head_bias.items(): first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight) size_diff = new_num_tokens - old_num_tokens final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens] # initialize new bias if tf.math.greater(size_diff, 0): padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]] current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy] bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True) bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False) else: slice_from = [0] if first_dim is None else [0, 0] current_bias = tf.slice( weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape) ) bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True) new_bias = self.add_weight( shape=final_shape, initializer="zeros", trainable=True, name=weight.name.split(":")[0], ) init_bias = tf.where(bias_mask, current_bias, new_bias.value()) new_bias.assign(init_bias) new_lm_head_bias[attr] = new_bias return new_lm_head_bias def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens): """ Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head_decoder (:obj:`tf.Variable`): Old lm head decoder to be resized. new_num_tokens (:obj:`int`, `optional`): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns None Return: :obj:`tf.Variable`: Pointer to the resized decoder or None if the output embeddings are different from the input ones. """ new_lm_head_decoder = old_lm_head_decoder is_input_output_equals = tf.reduce_any( self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder ) if old_lm_head_decoder is not None and not is_input_output_equals: old_embedding_dim = shape_list(old_lm_head_decoder)[1] decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens) new_lm_head_decoder = self.add_weight( shape=(new_num_tokens, old_embedding_dim), initializer="zeros", trainable=True, name=old_lm_head_decoder.name.split(":")[0], ) init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value()) new_lm_head_decoder.assign(init_decoder) return new_lm_head_decoder def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable: """ Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_embeddings (:obj:`tf.Variable`): Old embeddings to be resized. new_num_tokens (:obj:`int`, `optional`): New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens :obj:`tf.Variable`` module of the model without doing anything. Return: :obj:`tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if :obj:`new_num_tokens` is :obj:`None` """ old_embedding_dim = shape_list(old_embeddings)[1] init_range = getattr(self.config, "initializer_range", 0.02) embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens) new_embeddings = self.add_weight( name=old_embeddings.name.split(":")[0], shape=[new_num_tokens, old_embedding_dim], initializer=get_initializer(init_range), dtype=tf.float32, ) init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value()) new_embeddings.assign(init_embeddings) return new_embeddings def prune_heads(self, heads_to_prune): """ Prunes heads of the base model. Arguments: heads_to_prune (:obj:`Dict[int, List[int]]`): Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. """ raise NotImplementedError def save_pretrained(self, save_directory, saved_model=False, version=1, push_to_hub=False, **kwargs): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the :func:`~transformers.TFPreTrainedModel.from_pretrained` class method. Arguments: save_directory (:obj:`str`): Directory to which to save. Will be created if it doesn't exist. saved_model (:obj:`bool`, `optional`, defaults to :obj:`False`): If the model has to be saved in saved model format as well or not. version (:obj:`int`, `optional`, defaults to 1): The version of the saved model. A saved model needs to be versioned in order to be properly loaded by TensorFlow Serving as detailed in the official documentation https://www.tensorflow.org/tfx/serving/serving_basic push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to push your model to the Hugging Face model hub after saving it. .. warning:: Using :obj:`push_to_hub=True` will synchronize the repository you are pushing to with :obj:`save_directory`, which requires :obj:`save_directory` to be a local clone of the repo you are pushing to if it's an existing folder. Pass along :obj:`temp_dir=True` to use a temporary directory instead. kwargs: Additional key word arguments passed along to the :meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method. """ if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo = self._create_or_get_repo(save_directory, **kwargs) os.makedirs(save_directory, exist_ok=True) if saved_model: saved_model_dir = os.path.join(save_directory, "saved_model", str(version)) self.save(saved_model_dir, include_optimizer=False, signatures=self.serving) logger.info(f"Saved model created in {saved_model_dir}") # Save configuration file self.config.architectures = [self.__class__.__name__[2:]] self.config.save_pretrained(save_directory) # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(save_directory, TF2_WEIGHTS_NAME) self.save_weights(output_model_file) logger.info(f"Model weights saved in {output_model_file}") if push_to_hub: url = self._push_to_hub(repo, commit_message=commit_message) logger.info(f"Model pushed to the hub in this commit: {url}") @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiate a pretrained TF 2.0 model from a pre-trained model configuration. The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path (:obj:`str`, `optional`): Can be either: - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``. - A path to a `directory` containing model weights saved using :func:`~transformers.TFPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - A path or url to a `PyTorch state_dict save file` (e.g, ``./pt_model/pytorch_model.bin``). In this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards. - :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``). model_args (sequence of positional arguments, `optional`): All remaning positional arguments will be passed to the underlying model's ``__init__`` method. config (:obj:`Union[PretrainedConfig, str]`, `optional`): Can be either: - an instance of a class derived from :class:`~transformers.PretrainedConfig`, - a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`. Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the `model id` string of a pretrained model). - The model was saved using :func:`~transformers.TFPreTrainedModel.save_pretrained` and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. from_pt: (:obj:`bool`, `optional`, defaults to :obj:`False`): Load the model weights from a PyTorch state_dict save file (see docstring of ``pretrained_model_name_or_path`` argument). ignore_mismatched_sizes (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels). cache_dir (:obj:`str`, `optional`): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies: (:obj:`Dict[str, str], `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to only look at local files (e.g., not try doanloading the model). use_auth_token (:obj:`str` or `bool`, `optional`): The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`). revision(:obj:`str`, `optional`, defaults to :obj:`"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any identifier allowed by git. mirror(:obj:`str`, `optional`): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. kwargs (remaining dictionary of keyword arguments, `optional`): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., :obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or automatically loaded: - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. .. note:: Passing :obj:`use_auth_token=True` is required when you want to use a private model. Examples:: >>> from transformers import BertConfig, TFBertModel >>> # Download model and configuration from huggingface.co and cache. >>> model = TFBertModel.from_pretrained('bert-base-uncased') >>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable). >>> model = TFBertModel.from_pretrained('./test/saved_model/') >>> # Update configuration during loading. >>> model = TFBertModel.from_pretrained('bert-base-uncased', output_attentions=True) >>> assert model.config.output_attentions == True >>> # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable). >>> config = BertConfig.from_json_file('./pt_model/my_pt_model_config.json') >>> model = TFBertModel.from_pretrained('./pt_model/my_pytorch_model.bin', from_pt=True, config=config) """ config = kwargs.pop("config", None) cache_dir = kwargs.pop("cache_dir", None) from_pt = kwargs.pop("from_pt", False) ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) output_loading_info = kwargs.pop("output_loading_info", False) local_files_only = kwargs.pop("local_files_only", False) use_auth_token = kwargs.pop("use_auth_token", None) revision = kwargs.pop("revision", None) mirror = kwargs.pop("mirror", None) load_weight_prefix = kwargs.pop("load_weight_prefix", None) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) user_agent = {"file_type": "model", "framework": "tensorflow", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True # Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, *model_args, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs, ) else: model_kwargs = kwargs # Load model if pretrained_model_name_or_path is not None: if os.path.isdir(pretrained_model_name_or_path): if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint in priority if from_pt archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): # Load from a TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) else: raise EnvironmentError( f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME]} found in directory " f"{pretrained_model_name_or_path} or `from_pt` set to False" ) elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path elif os.path.isfile(pretrained_model_name_or_path + ".index"): archive_file = pretrained_model_name_or_path + ".index" else: archive_file = hf_bucket_url( pretrained_model_name_or_path, filename=(WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME), revision=revision, mirror=mirror, ) try: # Load from URL or cache if already cached resolved_archive_file = cached_path( archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, ) except EnvironmentError as err: logger.error(err) msg = ( f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n" f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n" f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {TF2_WEIGHTS_NAME}, {WEIGHTS_NAME}.\n\n" ) raise EnvironmentError(msg) if resolved_archive_file == archive_file: logger.info(f"loading weights file {archive_file}") else: logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}") else: resolved_archive_file = None config.name_or_path = pretrained_model_name_or_path # composed models, *e.g.* TFRag, require special treatment when it comes to loading # pre-trained weights. if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None: model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name") # Instantiate model. model = cls(config, *model_args, **model_kwargs) if from_pt: from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model # Load from a PyTorch checkpoint return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True) # we might need to extend the variable scope for composite models if load_weight_prefix is not None: with tf.compat.v1.variable_scope(load_weight_prefix): model(model.dummy_inputs) # build the network with dummy inputs else: model(model.dummy_inputs) # build the network with dummy inputs assert os.path.isfile(resolved_archive_file), f"Error retrieving file {resolved_archive_file}" # 'by_name' allow us to do transfer learning by skipping/adding layers # see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357 try: missing_keys, unexpected_keys, mismatched_keys = load_tf_weights( model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=load_weight_prefix, ) except OSError: raise OSError( "Unable to load weights from h5 file. " "If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. " ) model(model.dummy_inputs) # Make sure restore ops are run if cls._keys_to_ignore_on_load_missing is not None: for pat in cls._keys_to_ignore_on_load_missing: missing_keys = [k for k in missing_keys if re.search(pat, k) is None] if cls._keys_to_ignore_on_load_unexpected is not None: for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning( f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when " f"initializing {model.__class__.__name__}: {unexpected_keys}\n" f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task " f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n" f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect " f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." ) else: logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " f"and are newly initialized: {missing_keys}\n" f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) elif len(mismatched_keys) == 0: logger.warning( f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n" f"If your task is similar to the task the model of the checkpoint was trained on, " f"you can already use {model.__class__.__name__} for predictions without further training." ) if len(mismatched_keys) > 0: mismatched_warning = "\n".join( [ f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" for key, shape1, shape2 in mismatched_keys ] ) logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " f"and are newly initialized because the shapes did not match:\n{mismatched_warning}\n" f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) if output_loading_info: loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "mismatched_keys": mismatched_keys, } return model, loading_info return model # To update the docstring, we need to copy the method, otherwise we change the original docstring. TFPreTrainedModel.push_to_hub = copy_func(TFPreTrainedModel.push_to_hub) TFPreTrainedModel.push_to_hub.__doc__ = TFPreTrainedModel.push_to_hub.__doc__.format( object="model", object_class="TFAutoModel", object_files="model checkpoint" ) class TFConv1D(tf.keras.layers.Layer): """ 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). Basically works like a linear layer but the weights are transposed. Args: nf (:obj:`int`): The number of output features. nx (:obj:`int`): The number of input features. initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation to use to initialize the weights. kwargs: Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`. """ def __init__(self, nf, nx, initializer_range=0.02, **kwargs): super().__init__(**kwargs) self.nf = nf self.nx = nx self.initializer_range = initializer_range def build(self, input_shape): self.weight = self.add_weight( "weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range) ) self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer()) def call(self, x): bz, sl = shape_list(x)[:2] x = tf.reshape(x, [-1, self.nx]) x = tf.matmul(x, self.weight) + self.bias x = tf.reshape(x, [bz, sl, self.nf]) return x class TFSharedEmbeddings(tf.keras.layers.Layer): r""" Construct shared token embeddings. The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language modeling. Args: vocab_size (:obj:`int`): The size of the vocabulary, e.g., the number of unique tokens. hidden_size (:obj:`int`): The size of the embedding vectors. initializer_range (:obj:`float`, `optional`): The standard deviation to use when initializing the weights. If no value is provided, it will default to :math:`1/\sqrt{hidden\_size}`. kwargs: Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`. """ def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.initializer_range = hidden_size ** -0.5 if initializer_range is None else initializer_range def build(self, input_shape): """ Build shared token embedding layer Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ self.weight = self.add_weight( "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range) ) super().build(input_shape) def get_config(self): config = { "vocab_size": self.vocab_size, "hidden_size": self.hidden_size, "initializer_range": self.initializer_range, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor: """ Get token embeddings of inputs or decode final hidden state. Args: inputs (:obj:`tf.Tensor`): In embedding mode, should be an int64 tensor with shape :obj:`[batch_size, length]`. In linear mode, should be a float tensor with shape :obj:`[batch_size, length, hidden_size]`. mode (:obj:`str`, defaults to :obj:`"embedding"`): A valid value is either :obj:`"embedding"` or :obj:`"linear"`, the first one indicates that the layer should be used as an embedding layer, the second one that the layer should be used as a linear decoder. Returns: :obj:`tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape :obj:`[batch_size, length, embedding_size]`. In linear mode, the output is a float32 with shape :obj:`[batch_size, length, vocab_size]`. Raises: ValueError: if :obj:`mode` is not valid. Shared weights logic is adapted from `here <https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24>`__. """ if mode == "embedding": return self._embedding(inputs) elif mode == "linear": return self._linear(inputs) else: raise ValueError(f"mode {mode} is not valid.") def _embedding(self, input_ids): """Applies embedding based on inputs tensor.""" return tf.gather(self.weight, input_ids) def _linear(self, inputs): """ Computes logits by running inputs through a linear layer. Args: inputs: A float32 tensor with shape [..., hidden_size] Returns: float32 tensor with shape [..., vocab_size]. """ first_dims = shape_list(inputs)[:-1] x = tf.reshape(inputs, [-1, self.hidden_size]) logits = tf.matmul(x, self.weight, transpose_b=True) return tf.reshape(logits, first_dims + [self.vocab_size]) class TFSequenceSummary(tf.keras.layers.Layer): """ Compute a single vector summary of a sequence hidden states. Args: config (:class:`~transformers.PretrainedConfig`): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are: - :obj:`"last"` -- Take the last token hidden state (like XLNet) - :obj:`"first"` -- Take the first token hidden state (like Bert) - :obj:`"mean"` -- Take the mean of all tokens hidden states - :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) - :obj:`"attn"` -- Not implemented now, use multi-head attention - **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to :obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`). - **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the output, another string or :obj:`None` will add no activation. - **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and activation. initializer_range (:obj:`float`, defaults to 0.02): The standard deviation to use to initialize the weights. kwargs: Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`. """ def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs): super().__init__(**kwargs) self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last" if self.summary_type == "attn": # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj if self.has_summary: if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = tf.keras.layers.Dense( num_classes, kernel_initializer=get_initializer(initializer_range), name="summary" ) self.has_activation = hasattr(config, "summary_activation") and config.summary_activation == "tanh" if self.has_activation: self.activation = tf.keras.activations.tanh self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0 if self.has_first_dropout: self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout) self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0 if self.has_last_dropout: self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout) def call(self, inputs, cls_index=None, training=False): if not isinstance(inputs, (dict, tuple, list)): hidden_states = inputs elif isinstance(inputs, (tuple, list)): hidden_states = inputs[0] cls_index = inputs[1] if len(inputs) > 1 else None assert len(inputs) <= 2, "Too many inputs." else: hidden_states = inputs.get("hidden_states") cls_index = inputs.get("cls_index", None) if self.summary_type == "last": output = hidden_states[:, -1] elif self.summary_type == "first": output = hidden_states[:, 0] elif self.summary_type == "mean": output = tf.reduce_mean(hidden_states, axis=1) elif self.summary_type == "cls_index": hidden_shape = shape_list(hidden_states) # e.g. [batch, num choices, seq length, hidden dims] if cls_index is None: cls_index = tf.fill( hidden_shape[:-2], hidden_shape[-2] - 1 ) # A tensor full of shape [batch] or [batch, num choices] full of sequence length cls_shape = shape_list(cls_index) if len(cls_shape) <= len(hidden_shape) - 2: cls_index = tf.expand_dims(cls_index, axis=-1) # else: # cls_index = cls_index[..., tf.newaxis] # cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2) output = tf.squeeze( output, axis=len(hidden_shape) - 2 ) # shape of output: (batch, num choices, hidden_size) elif self.summary_type == "attn": raise NotImplementedError if self.has_first_dropout: output = self.first_dropout(output, training=training) if self.has_summary: output = self.summary(output) if self.has_activation: output = self.activation(output) if self.has_last_dropout: output = self.last_dropout(output, training=training) return output def shape_list(tensor: tf.Tensor) -> List[int]: """ Deal with dynamic shape in tensorflow cleanly. Args: tensor (:obj:`tf.Tensor`): The tensor we want the shape of. Returns: :obj:`List[int]`: The shape of the tensor as a list. """ dynamic = tf.shape(tensor) if tensor.shape == tf.TensorShape(None): return dynamic static = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(static)] def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal: """ Creates a :obj:`tf.initializers.TruncatedNormal` with the given range. Args: initializer_range (`float`, defaults to 0.02): Standard deviation of the initializer range. Returns: :obj:`tf.initializers.TruncatedNormal`: The truncated normal initializer. """ return tf.keras.initializers.TruncatedNormal(stddev=initializer_range) class TFWrappedEmbeddings: """ this class wraps a the TFSharedEmbeddingTokens layer into a python 'no-keras-layer' class to avoid problem with weight restoring. Also it makes sure that the layer is called from the correct scope to avoid problem with saving/storing the correct weights """ def __init__(self, layer, abs_scope_name=None): self._layer = layer self._abs_scope_name = abs_scope_name def call(self, inputs, mode="embedding"): if self._abs_scope_name is None: return self._layer.call(inputs, mode) # if an abs scope name is given to the embedding variable, call variable from absolute scope with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name: with tf.name_scope(abs_scope_name.original_name_scope): return self._layer.call(inputs, mode) def __call__(self, inputs, mode="embedding"): if self._abs_scope_name is None: return self._layer(inputs, mode) # if an abs scope name is given to the embedding variable, call variable from absolute scope with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name: with tf.name_scope(abs_scope_name.original_name_scope): return self._layer(inputs, mode)
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF general model utils.""" import functools import inspect import os import re import warnings from typing import Dict, List, Optional, Union import h5py import numpy as np import tensorflow as tf from tensorflow.python.keras import backend as K from tensorflow.python.keras.saving import hdf5_format from .configuration_utils import PretrainedConfig from .file_utils import ( DUMMY_INPUTS, TF2_WEIGHTS_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_path, copy_func, hf_bucket_url, is_offline_mode, is_remote_url, ) from .generation_tf_utils import TFGenerationMixin from .tokenization_utils_base import BatchEncoding from .utils import logging logger = logging.get_logger(__name__) tf_logger = tf.get_logger() TFModelInputType = Union[ List[tf.Tensor], List[np.ndarray], Dict[str, tf.Tensor], Dict[str, np.ndarray], np.ndarray, tf.Tensor ] class TFModelUtilsMixin: """ A few utilities for :obj:`tf.keras.Model`, to be used as a mixin. """ def num_parameters(self, only_trainable: bool = False) -> int: """ Get the number of (optionally, trainable) parameters in the model. Args: only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to return only the number of trainable parameters Returns: :obj:`int`: The number of parameters. """ if only_trainable: return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables)) else: return self.count_params() def keras_serializable(cls): """ Decorate a Keras Layer class to support Keras serialization. This is done by: 1. Adding a :obj:`transformers_config` dict to the Keras config dictionary in :obj:`get_config` (called by Keras at serialization time. 2. Wrapping :obj:`__init__` to accept that :obj:`transformers_config` dict (passed by Keras at deserialization time) and convert it to a config object for the actual layer initializer. 3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not need to be supplied in :obj:`custom_objects` in the call to :obj:`tf.keras.models.load_model`. Args: cls (a :obj:`tf.keras.layers.Layers subclass`): Typically a :obj:`TF.MainLayer` class in this project, in general must accept a :obj:`config` argument to its initializer. Returns: The same class object, with modifications for Keras deserialization. """ initializer = cls.__init__ config_class = getattr(cls, "config_class", None) if config_class is None: raise AttributeError("Must set `config_class` to use @keras_serializable") @functools.wraps(initializer) def wrapped_init(self, *args, **kwargs): config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None) if isinstance(config, dict): config = config_class.from_dict(config) initializer(self, config, *args, **kwargs) elif isinstance(config, PretrainedConfig): if len(args) > 0: initializer(self, *args, **kwargs) else: initializer(self, config, *args, **kwargs) else: raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)") self._config = config self._kwargs = kwargs cls.__init__ = wrapped_init if not hasattr(cls, "get_config"): raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses") if hasattr(cls.get_config, "_is_default"): def get_config(self): cfg = super(cls, self).get_config() cfg["config"] = self._config.to_dict() cfg.update(self._kwargs) return cfg cls.get_config = get_config cls._keras_serializable = True if hasattr(tf.keras.utils, "register_keras_serializable"): cls = tf.keras.utils.register_keras_serializable()(cls) return cls class TFCausalLanguageModelingLoss: """ Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) # make sure only labels that are not equal to -100 affect the loss active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) return loss_fn(labels, reduced_logits) class TFQuestionAnsweringLoss: """ Loss function suitable for question answering. """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) start_loss = loss_fn(labels["start_position"], logits[0]) end_loss = loss_fn(labels["end_position"], logits[1]) return (start_loss + end_loss) / 2.0 class TFTokenClassificationLoss: """ Loss function suitable for token classification. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) # make sure only labels that are not equal to -100 # are taken into account as loss if tf.math.reduce_any(labels == -1): warnings.warn("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.") active_loss = tf.reshape(labels, (-1,)) != -1 else: active_loss = tf.reshape(labels, (-1,)) != -100 reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) return loss_fn(labels, reduced_logits) class TFSequenceClassificationLoss: """ Loss function suitable for sequence classification. """ def compute_loss(self, labels, logits): if len(shape_list(logits)) == 1 or shape_list(logits)[1] == 1: loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE) else: loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) return loss_fn(labels, logits) class TFMultipleChoiceLoss(TFSequenceClassificationLoss): """Loss function suitable for multiple choice tasks.""" class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss): """ Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ class TFNextSentencePredictionLoss: """ Loss function suitable for next sentence prediction (NSP), that is, the task of guessing the next sentence. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. """ def compute_loss(self, labels, logits): loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE ) # make sure only labels that are not equal to -100 # are taken into account as loss next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss) next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss) return loss_fn(next_sentence_label, next_sentence_reduced_logits) def booleans_processing(config, **kwargs): """ Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or graph) Args: config (:class:`~transformers.PretrainedConfig`): The config of the running model. **kwargs: The boolean parameters Returns: A dictionary with the proper values for each boolean """ final_booleans = {} if tf.executing_eagerly(): final_booleans["output_attentions"] = ( kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions ) final_booleans["output_hidden_states"] = ( kwargs["output_hidden_states"] if kwargs["output_hidden_states"] is not None else config.output_hidden_states ) final_booleans["return_dict"] = ( kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict ) if "use_cache" in kwargs: final_booleans["use_cache"] = kwargs["use_cache"] if kwargs["use_cache"] is not None else config.use_cache else: if ( kwargs["output_attentions"] is not None or kwargs["output_hidden_states"] is not None or ("use_cache" in kwargs and kwargs["use_cache"] is not None) ): tf_logger.warning( "The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model." "They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained('name', output_attentions=True)`)." ) final_booleans["output_attentions"] = config.output_attentions final_booleans["output_hidden_states"] = config.output_hidden_states if kwargs["return_dict"] is not None: tf_logger.warning( "The parameter `return_dict` cannot be set in graph mode and will always be set to `True`." ) final_booleans["return_dict"] = True if "use_cache" in kwargs: final_booleans["use_cache"] = config.use_cache return final_booleans def input_processing(func, config, input_ids, **kwargs): """ Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input has to be named accordingly to the parameters name, i.e. `input_ids = tf.keras.Input(shape=(128,), dtype='int32', name="input_ids")` otherwise the order of the tensors will not be guaranteed during the training. Args: func (:obj:`callable`): The callable function of the TensorFlow model. config (:class:`~transformers.PretrainedConfig`): The config of the running model. **kwargs: The inputs of the model. Returns: Two lists, one for the missing layers, and another one for the unexpected layers. """ signature = dict(inspect.signature(func).parameters) signature.pop("kwargs", None) signature.pop("self", None) parameter_names = list(signature.keys()) output = {} allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray) if "inputs" in kwargs["kwargs_call"]: warnings.warn( "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.", FutureWarning, ) output["input_ids"] = kwargs["kwargs_call"].pop("inputs") if "decoder_cached_states" in kwargs["kwargs_call"]: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states") if len(kwargs["kwargs_call"]) > 0: raise ValueError( f"The following keyword arguments are not supported by this model: {list(kwargs['kwargs_call'].keys())}." ) kwargs.pop("kwargs_call") for k, v in kwargs.items(): if isinstance(v, allowed_types) or v is None: output[k] = v else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") if isinstance(input_ids, (tuple, list)): for i, input in enumerate(input_ids): # EagerTensors don't allow to use the .name property so we check for a real Tensor if type(input) == tf.Tensor: # Tensor names have always the pattern `name:id` then we check only the # `name` part tensor_name = input.name.split(":")[0] if tensor_name in parameter_names: output[tensor_name] = input else: output[parameter_names[i]] = input elif isinstance(input, allowed_types) or input is None: output[parameter_names[i]] = input else: raise ValueError( f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}." ) elif isinstance(input_ids, (dict, BatchEncoding)): if "inputs" in input_ids: warnings.warn( "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.", FutureWarning, ) output["input_ids"] = input_ids.pop("inputs") if "decoder_cached_states" in input_ids: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) output["past_key_values"] = input_ids.pop("decoder_cached_states") for k, v in dict(input_ids).items(): if isinstance(v, allowed_types) or v is None: output[k] = v elif k not in parameter_names and "args" not in parameter_names: logger.warning( f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored." ) continue else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") else: if isinstance(input_ids, tf.Tensor) or input_ids is None: output[parameter_names[0]] = input_ids else: raise ValueError( f"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}." ) for name in parameter_names: if name not in list(output.keys()) and name != "args": output[name] = kwargs.pop(name, signature[name].default) # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs) # So to respect the proper output we have to add this exception if "args" in output: if output["args"] is not None and type(output["args"]) == tf.Tensor: tensor_name = output["args"].name.split(":")[0] output[tensor_name] = output["args"] else: # `args` in this case is always the first parameter, then `input_ids` output["input_ids"] = output["args"] del output["args"] if "kwargs" in output: del output["kwargs"] boolean_dict = { k: v for k, v in output.items() if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"] } output.update( booleans_processing( config=config, **boolean_dict, ) ) return output def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): """ Detect missing and unexpected layers and load the TF weights accordingly to their names and shapes. Args: model (:obj:`tf.keras.models.Model`): The model to load the weights into. resolved_archive_file (:obj:`str`): The location of the H5 file. ignore_mismatched_sizes (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to ignore weights with shapes that don't match between the checkpoint of the model. Returns: Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the mismatched layers. """ missing_layers = [] unexpected_layers = [] mismatched_layers = [] # Read the H5 file with h5py.File(resolved_archive_file, "r") as f: # Retrieve the name of each layer from the H5 file saved_h5_model_layers_name = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names")) # Find the missing layers from the high level list of layers missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name) # Find the unexpected layers from the high level list of layers unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers])) saved_weight_names_set = set() symbolic_weights_names = set() weight_value_tuples = [] # Compute missing and unexpected sub layers # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...] for layer in model.layers: # if layer_name from the H5 file belongs to the layers from the instantiated model if layer.name in saved_h5_model_layers_name: # Get the H5 layer object from its name h5_layer_object = f[layer.name] # Get all the weights as a list from the layer object symbolic_weights = layer.trainable_weights + layer.non_trainable_weights saved_weights = {} # Create a dict from the H5 saved model that looks like {"weight_name": weight_value} # And a set with only the names for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, "weight_names"): # TF names always start with the model name so we ignore it name = "/".join(weight_name.split("/")[1:]) if _prefix is not None: name = _prefix + "/" + name saved_weights[name] = np.asarray(h5_layer_object[weight_name]) # Add the updated name to the final list for computing missing/unexpected values saved_weight_names_set.add(name) # Loop over each weights from the instantiated model and compare with the weights from the H5 file for symbolic_weight in symbolic_weights: # TF names always start with the model name so we ignore it if _prefix is not None: delimeter = len(_prefix.split("/")) symbolic_weight_name = "/".join( symbolic_weight.name.split("/")[:delimeter] + symbolic_weight.name.split("/")[delimeter + 1 :] ) else: symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:]) # here we check if the current weight is among the weights from the H5 file # If yes, get the weight_value of the corresponding weight from the H5 file # If not, make the value to None saved_weight_value = saved_weights.get(symbolic_weight_name, None) # Add the updated name to the final list for computing missing/unexpected values symbolic_weights_names.add(symbolic_weight_name) # If the current weight is found if saved_weight_value is not None: # Check if the shape of the current weight and the one from the H5 file are different if K.int_shape(symbolic_weight) != saved_weight_value.shape: # If yes we reshape the weight from the H5 file accordingly to the current weight # If the two shapes are not compatible we raise an issue try: array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight)) except ValueError as e: if ignore_mismatched_sizes: mismatched_layers.append( (symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight)) ) continue else: raise e else: array = saved_weight_value # We create the tuple that will be loaded and add it to the final list weight_value_tuples.append((symbolic_weight, array)) # Load all the weights K.batch_set_value(weight_value_tuples) # Compute the missing and unexpected layers missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set)) unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names)) return missing_layers, unexpected_layers, mismatched_layers def init_copy_embeddings(old_embeddings, new_num_tokens): r""" This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be kept or not. Example: - if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4] - mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1] - if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5] - mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4] """ old_num_tokens, old_embedding_dim = shape_list(old_embeddings) size_diff = new_num_tokens - old_num_tokens # initialize new embeddings # Copy token embeddings from the previous ones if tf.math.greater(size_diff, 0): # if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size # and we create a mask to properly identify the padded values and be replaced by the values of the newly created # embeddings current_weights = tf.pad( old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1 ) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True) mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False) else: # if the new size if lower than the old one, we take the current embeddings until the new size current_weights = tf.slice( old_embeddings.value(), tf.convert_to_tensor([0, 0]), tf.convert_to_tensor([new_num_tokens, old_embedding_dim]), ) mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True) return mask, current_weights class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin): r""" Base class for all TF models. :class:`~transformers.TFPreTrainedModel` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to: * resize the input embeddings, * prune heads in the self-attention heads. Class attributes (overridden by derived classes): - **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture. - **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. """ config_class = None base_model_prefix = "" # a list of re pattern of tensor names to ignore from the model when loading the model weights # (and avoid unnecessary warnings). _keys_to_ignore_on_load_missing = None # a list of re pattern of tensor names to ignore from the weights when loading the model weights # (and avoid unnecessary warnings). _keys_to_ignore_on_load_unexpected = None _requires_load_weight_prefix = False @property def dummy_inputs(self) -> Dict[str, tf.Tensor]: """ Dummy inputs to build the network. Returns: :obj:`Dict[str, tf.Tensor]`: The dummy inputs. """ return { "input_ids": tf.constant(DUMMY_INPUTS), } def __init__(self, config, *inputs, **kwargs): super().__init__(*inputs, **kwargs) if not isinstance(config, PretrainedConfig): raise ValueError( f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " "`PretrainedConfig`. To create a model from a pretrained model use " f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" ) # Save config and origin of the pretrained weights if given in model self.config = config self.name_or_path = config.name_or_path @classmethod def _from_config(cls, config, **kwargs): """ All context managers that the model should be initialized under go here. """ return cls(config, **kwargs) @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), } ] ) def serving(self, inputs): """ Method used for serving the model. Args: inputs (:obj:`Dict[str, tf.Tensor]`): The input of the saved model as a dictionary of tensors. """ output = self.call(inputs) return self.serving_output(output) def serving_output(output): """ Prepare the output of the saved model. Each model must implement this function. Args: output (:obj:`~transformers.TFBaseModelOutput`): The output returned by the model. """ raise NotImplementedError def get_input_embeddings(self) -> tf.keras.layers.Layer: """ Returns the model's input embeddings layer. Returns: :obj:`tf.Variable`: The embeddings layer mapping vocabulary to hidden states. """ main_layer = getattr(self, self.base_model_prefix, self) if main_layer is not self: return main_layer.get_input_embeddings() else: raise NotImplementedError def set_input_embeddings(self, value): """ Set model's input embeddings Args: value (:obj:`tf.Variable`): The new weights mapping hidden states to vocabulary. """ main_layer = getattr(self, self.base_model_prefix) if main_layer is None: raise NotImplementedError("The model does not implements the base_model_prefix attribute.") try: main_layer.set_input_embeddings(value) except AttributeError: logger.info("Building the model") self(self.dummy_inputs) main_layer.set_input_embeddings(value) def get_output_embeddings(self) -> Union[None, tf.keras.layers.Layer]: """ Returns the model's output embeddings Returns: :obj:`tf.Variable`: The new weights mapping vocabulary to hidden states. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() return lm_head.get_output_embeddings() return None # Overwrite for models with output embeddings def set_output_embeddings(self, value): """ Set model's output embeddings Args: value (:obj:`tf.Variable`): The new weights mapping hidden states to vocabulary. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: lm_head.set_output_embeddings(value) except AttributeError: logger.info("Building the model") self(self.dummy_inputs) lm_head.set_output_embeddings(value) def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]: """ Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the embeddings Return: :obj:`tf.keras.layers.Layer`: The layer that handles the bias, None if not an LM model. """ warnings.warn( "The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning ) return self.get_lm_head() def get_prefix_bias_name(self) -> Union[None, str]: """ Get the concatenated _prefix name of the bias from the model name to the parent layer Return: :obj:`str`: The _prefix name of the bias. """ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return None def get_bias(self) -> Union[None, Dict[str, tf.Variable]]: """ Dict of bias attached to an LM head. The key represents the name of the bias attribute. Return: :obj:`tf.Variable`: The weights representing the bias, None if not an LM model. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: return lm_head.get_bias() except AttributeError: self(self.dummy_inputs) return lm_head.get_bias() return None def set_bias(self, value): """ Set all the bias in the LM head. Args: value (:obj:`Dict[tf.Variable]`): All the new bias attached to an LM head. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: lm_head.set_bias(value) except AttributeError: self(self.dummy_inputs) lm_head.set_bias(value) def get_lm_head(self) -> tf.keras.layers.Layer: """ The LM Head layer. This method must be overwritten by all the models that have a lm head. Return: :obj:`tf.keras.layers.Layer`: The LM head layer if the model has one, None if not. """ return None def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable: """ Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`. Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method. Arguments: new_num_tokens (:obj:`int`, `optional`): The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens :obj:`tf.Variable` module of the model without doing anything. Return: :obj:`tf.Variable`: Pointer to the input tokens Embeddings Module of the model. """ if new_num_tokens is None or new_num_tokens == self.config.vocab_size: return self._get_word_embedding_weight(self.get_input_embeddings()) model_embeds = self._resize_token_embeddings(new_num_tokens) # Update base model and current model config self.config.vocab_size = new_num_tokens return model_embeds def _get_word_embedding_weight(model, embedding_layer): embeds = getattr(embedding_layer, "weight", None) if embeds is not None: return embeds embeds = getattr(embedding_layer, "decoder", None) if embeds is not None: return embeds # The reason why the attributes don't exist might be # because the model is not built, so retry getting # the argument after building the model model(model.dummy_inputs) embeds = getattr(embedding_layer, "weight", None) if embeds is not None: return embeds embeds = getattr(embedding_layer, "decoder", None) if embeds is not None: return embeds return None def _resize_token_embeddings(self, new_num_tokens): old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings()) new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) # if word embeddings are not tied, make sure that lm head bias is resized as well if self.get_bias() is not None: old_lm_head_bias = self.get_bias() new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens) self.set_bias(new_lm_head_bias) # if word embeddings are not tied, make sure that lm head decoder is resized as well if self.get_output_embeddings() is not None: old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings()) new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens) self.set_output_embeddings(new_lm_head_decoder) self.set_input_embeddings(new_embeddings) return self.get_input_embeddings() def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens): """ Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head_bias (:obj:`tf.Variable`): Old lm head bias to be resized. new_num_tokens (:obj:`int`, `optional`): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns None Return: :obj:`tf.Variable`: Pointer to the resized bias. """ new_lm_head_bias = {} for attr, weight in old_lm_head_bias.items(): first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight) size_diff = new_num_tokens - old_num_tokens final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens] # initialize new bias if tf.math.greater(size_diff, 0): padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]] current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy] bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True) bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False) else: slice_from = [0] if first_dim is None else [0, 0] current_bias = tf.slice( weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape) ) bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True) new_bias = self.add_weight( shape=final_shape, initializer="zeros", trainable=True, name=weight.name.split(":")[0], ) init_bias = tf.where(bias_mask, current_bias, new_bias.value()) new_bias.assign(init_bias) new_lm_head_bias[attr] = new_bias return new_lm_head_bias def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens): """ Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head_decoder (:obj:`tf.Variable`): Old lm head decoder to be resized. new_num_tokens (:obj:`int`, `optional`): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns None Return: :obj:`tf.Variable`: Pointer to the resized decoder or None if the output embeddings are different from the input ones. """ new_lm_head_decoder = old_lm_head_decoder is_input_output_equals = tf.reduce_any( self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder ) if old_lm_head_decoder is not None and not is_input_output_equals: old_embedding_dim = shape_list(old_lm_head_decoder)[1] decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens) new_lm_head_decoder = self.add_weight( shape=(new_num_tokens, old_embedding_dim), initializer="zeros", trainable=True, name=old_lm_head_decoder.name.split(":")[0], ) init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value()) new_lm_head_decoder.assign(init_decoder) return new_lm_head_decoder def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable: """ Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_embeddings (:obj:`tf.Variable`): Old embeddings to be resized. new_num_tokens (:obj:`int`, `optional`): New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens :obj:`tf.Variable`` module of the model without doing anything. Return: :obj:`tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if :obj:`new_num_tokens` is :obj:`None` """ old_embedding_dim = shape_list(old_embeddings)[1] init_range = getattr(self.config, "initializer_range", 0.02) embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens) new_embeddings = self.add_weight( name=old_embeddings.name.split(":")[0], shape=[new_num_tokens, old_embedding_dim], initializer=get_initializer(init_range), dtype=tf.float32, ) init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value()) new_embeddings.assign(init_embeddings) return new_embeddings def prune_heads(self, heads_to_prune): """ Prunes heads of the base model. Arguments: heads_to_prune (:obj:`Dict[int, List[int]]`): Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. """ raise NotImplementedError def save_pretrained(self, save_directory, saved_model=False, version=1, push_to_hub=False, **kwargs): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the :func:`~transformers.TFPreTrainedModel.from_pretrained` class method. Arguments: save_directory (:obj:`str`): Directory to which to save. Will be created if it doesn't exist. saved_model (:obj:`bool`, `optional`, defaults to :obj:`False`): If the model has to be saved in saved model format as well or not. version (:obj:`int`, `optional`, defaults to 1): The version of the saved model. A saved model needs to be versioned in order to be properly loaded by TensorFlow Serving as detailed in the official documentation https://www.tensorflow.org/tfx/serving/serving_basic push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to push your model to the Hugging Face model hub after saving it. .. warning:: Using :obj:`push_to_hub=True` will synchronize the repository you are pushing to with :obj:`save_directory`, which requires :obj:`save_directory` to be a local clone of the repo you are pushing to if it's an existing folder. Pass along :obj:`temp_dir=True` to use a temporary directory instead. kwargs: Additional key word arguments passed along to the :meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method. """ if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo = self._create_or_get_repo(save_directory, **kwargs) os.makedirs(save_directory, exist_ok=True) if saved_model: saved_model_dir = os.path.join(save_directory, "saved_model", str(version)) self.save(saved_model_dir, include_optimizer=False, signatures=self.serving) logger.info(f"Saved model created in {saved_model_dir}") # Save configuration file self.config.architectures = [self.__class__.__name__[2:]] self.config.save_pretrained(save_directory) # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(save_directory, TF2_WEIGHTS_NAME) self.save_weights(output_model_file) logger.info(f"Model weights saved in {output_model_file}") if push_to_hub: url = self._push_to_hub(repo, commit_message=commit_message) logger.info(f"Model pushed to the hub in this commit: {url}") @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiate a pretrained TF 2.0 model from a pre-trained model configuration. The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path (:obj:`str`, `optional`): Can be either: - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``. - A path to a `directory` containing model weights saved using :func:`~transformers.TFPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - A path or url to a `PyTorch state_dict save file` (e.g, ``./pt_model/pytorch_model.bin``). In this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards. - :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``). model_args (sequence of positional arguments, `optional`): All remaning positional arguments will be passed to the underlying model's ``__init__`` method. config (:obj:`Union[PretrainedConfig, str]`, `optional`): Can be either: - an instance of a class derived from :class:`~transformers.PretrainedConfig`, - a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`. Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the `model id` string of a pretrained model). - The model was saved using :func:`~transformers.TFPreTrainedModel.save_pretrained` and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. from_pt: (:obj:`bool`, `optional`, defaults to :obj:`False`): Load the model weights from a PyTorch state_dict save file (see docstring of ``pretrained_model_name_or_path`` argument). ignore_mismatched_sizes (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels). cache_dir (:obj:`str`, `optional`): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies: (:obj:`Dict[str, str], `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to only look at local files (e.g., not try doanloading the model). use_auth_token (:obj:`str` or `bool`, `optional`): The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`). revision(:obj:`str`, `optional`, defaults to :obj:`"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any identifier allowed by git. mirror(:obj:`str`, `optional`): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. kwargs (remaining dictionary of keyword arguments, `optional`): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., :obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or automatically loaded: - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. .. note:: Passing :obj:`use_auth_token=True` is required when you want to use a private model. Examples:: >>> from transformers import BertConfig, TFBertModel >>> # Download model and configuration from huggingface.co and cache. >>> model = TFBertModel.from_pretrained('bert-base-uncased') >>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable). >>> model = TFBertModel.from_pretrained('./test/saved_model/') >>> # Update configuration during loading. >>> model = TFBertModel.from_pretrained('bert-base-uncased', output_attentions=True) >>> assert model.config.output_attentions == True >>> # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable). >>> config = BertConfig.from_json_file('./pt_model/my_pt_model_config.json') >>> model = TFBertModel.from_pretrained('./pt_model/my_pytorch_model.bin', from_pt=True, config=config) """ config = kwargs.pop("config", None) cache_dir = kwargs.pop("cache_dir", None) from_pt = kwargs.pop("from_pt", False) ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) output_loading_info = kwargs.pop("output_loading_info", False) local_files_only = kwargs.pop("local_files_only", False) use_auth_token = kwargs.pop("use_auth_token", None) revision = kwargs.pop("revision", None) mirror = kwargs.pop("mirror", None) load_weight_prefix = kwargs.pop("load_weight_prefix", None) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) user_agent = {"file_type": "model", "framework": "tensorflow", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True # Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, *model_args, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs, ) else: model_kwargs = kwargs # Load model if pretrained_model_name_or_path is not None: if os.path.isdir(pretrained_model_name_or_path): if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint in priority if from_pt archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): # Load from a TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) else: raise EnvironmentError( f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME]} found in directory " f"{pretrained_model_name_or_path} or `from_pt` set to False" ) elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path elif os.path.isfile(pretrained_model_name_or_path + ".index"): archive_file = pretrained_model_name_or_path + ".index" else: archive_file = hf_bucket_url( pretrained_model_name_or_path, filename=(WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME), revision=revision, mirror=mirror, ) try: # Load from URL or cache if already cached resolved_archive_file = cached_path( archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, ) except EnvironmentError as err: logger.error(err) msg = ( f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n" f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n" f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {TF2_WEIGHTS_NAME}, {WEIGHTS_NAME}.\n\n" ) raise EnvironmentError(msg) if resolved_archive_file == archive_file: logger.info(f"loading weights file {archive_file}") else: logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}") else: resolved_archive_file = None config.name_or_path = pretrained_model_name_or_path # composed models, *e.g.* TFRag, require special treatment when it comes to loading # pre-trained weights. if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None: model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name") # Instantiate model. model = cls(config, *model_args, **model_kwargs) if from_pt: from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model # Load from a PyTorch checkpoint return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True) # we might need to extend the variable scope for composite models if load_weight_prefix is not None: with tf.compat.v1.variable_scope(load_weight_prefix): model(model.dummy_inputs) # build the network with dummy inputs else: model(model.dummy_inputs) # build the network with dummy inputs assert os.path.isfile(resolved_archive_file), f"Error retrieving file {resolved_archive_file}" # 'by_name' allow us to do transfer learning by skipping/adding layers # see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357 try: missing_keys, unexpected_keys, mismatched_keys = load_tf_weights( model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=load_weight_prefix, ) except OSError: raise OSError( "Unable to load weights from h5 file. " "If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. " ) model(model.dummy_inputs) # Make sure restore ops are run if cls._keys_to_ignore_on_load_missing is not None: for pat in cls._keys_to_ignore_on_load_missing: missing_keys = [k for k in missing_keys if re.search(pat, k) is None] if cls._keys_to_ignore_on_load_unexpected is not None: for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning( f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when " f"initializing {model.__class__.__name__}: {unexpected_keys}\n" f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task " f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n" f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect " f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." ) else: logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " f"and are newly initialized: {missing_keys}\n" f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) elif len(mismatched_keys) == 0: logger.warning( f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n" f"If your task is similar to the task the model of the checkpoint was trained on, " f"you can already use {model.__class__.__name__} for predictions without further training." ) if len(mismatched_keys) > 0: mismatched_warning = "\n".join( [ f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" for key, shape1, shape2 in mismatched_keys ] ) logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " f"and are newly initialized because the shapes did not match:\n{mismatched_warning}\n" f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) if output_loading_info: loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "mismatched_keys": mismatched_keys, } return model, loading_info return model # To update the docstring, we need to copy the method, otherwise we change the original docstring. TFPreTrainedModel.push_to_hub = copy_func(TFPreTrainedModel.push_to_hub) TFPreTrainedModel.push_to_hub.__doc__ = TFPreTrainedModel.push_to_hub.__doc__.format( object="model", object_class="TFAutoModel", object_files="model checkpoint" ) class TFConv1D(tf.keras.layers.Layer): """ 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). Basically works like a linear layer but the weights are transposed. Args: nf (:obj:`int`): The number of output features. nx (:obj:`int`): The number of input features. initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation to use to initialize the weights. kwargs: Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`. """ def __init__(self, nf, nx, initializer_range=0.02, **kwargs): super().__init__(**kwargs) self.nf = nf self.nx = nx self.initializer_range = initializer_range def build(self, input_shape): self.weight = self.add_weight( "weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range) ) self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer()) def call(self, x): bz, sl = shape_list(x)[:2] x = tf.reshape(x, [-1, self.nx]) x = tf.matmul(x, self.weight) + self.bias x = tf.reshape(x, [bz, sl, self.nf]) return x class TFSharedEmbeddings(tf.keras.layers.Layer): r""" Construct shared token embeddings. The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language modeling. Args: vocab_size (:obj:`int`): The size of the vocabulary, e.g., the number of unique tokens. hidden_size (:obj:`int`): The size of the embedding vectors. initializer_range (:obj:`float`, `optional`): The standard deviation to use when initializing the weights. If no value is provided, it will default to :math:`1/\sqrt{hidden\_size}`. kwargs: Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`. """ def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.initializer_range = hidden_size ** -0.5 if initializer_range is None else initializer_range def build(self, input_shape): """ Build shared token embedding layer Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ self.weight = self.add_weight( "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range) ) super().build(input_shape) def get_config(self): config = { "vocab_size": self.vocab_size, "hidden_size": self.hidden_size, "initializer_range": self.initializer_range, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor: """ Get token embeddings of inputs or decode final hidden state. Args: inputs (:obj:`tf.Tensor`): In embedding mode, should be an int64 tensor with shape :obj:`[batch_size, length]`. In linear mode, should be a float tensor with shape :obj:`[batch_size, length, hidden_size]`. mode (:obj:`str`, defaults to :obj:`"embedding"`): A valid value is either :obj:`"embedding"` or :obj:`"linear"`, the first one indicates that the layer should be used as an embedding layer, the second one that the layer should be used as a linear decoder. Returns: :obj:`tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape :obj:`[batch_size, length, embedding_size]`. In linear mode, the output is a float32 with shape :obj:`[batch_size, length, vocab_size]`. Raises: ValueError: if :obj:`mode` is not valid. Shared weights logic is adapted from `here <https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24>`__. """ if mode == "embedding": return self._embedding(inputs) elif mode == "linear": return self._linear(inputs) else: raise ValueError(f"mode {mode} is not valid.") def _embedding(self, input_ids): """Applies embedding based on inputs tensor.""" return tf.gather(self.weight, input_ids) def _linear(self, inputs): """ Computes logits by running inputs through a linear layer. Args: inputs: A float32 tensor with shape [..., hidden_size] Returns: float32 tensor with shape [..., vocab_size]. """ first_dims = shape_list(inputs)[:-1] x = tf.reshape(inputs, [-1, self.hidden_size]) logits = tf.matmul(x, self.weight, transpose_b=True) return tf.reshape(logits, first_dims + [self.vocab_size]) class TFSequenceSummary(tf.keras.layers.Layer): """ Compute a single vector summary of a sequence hidden states. Args: config (:class:`~transformers.PretrainedConfig`): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are: - :obj:`"last"` -- Take the last token hidden state (like XLNet) - :obj:`"first"` -- Take the first token hidden state (like Bert) - :obj:`"mean"` -- Take the mean of all tokens hidden states - :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) - :obj:`"attn"` -- Not implemented now, use multi-head attention - **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to :obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`). - **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the output, another string or :obj:`None` will add no activation. - **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and activation. initializer_range (:obj:`float`, defaults to 0.02): The standard deviation to use to initialize the weights. kwargs: Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`. """ def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs): super().__init__(**kwargs) self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last" if self.summary_type == "attn": # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj if self.has_summary: if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = tf.keras.layers.Dense( num_classes, kernel_initializer=get_initializer(initializer_range), name="summary" ) self.has_activation = hasattr(config, "summary_activation") and config.summary_activation == "tanh" if self.has_activation: self.activation = tf.keras.activations.tanh self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0 if self.has_first_dropout: self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout) self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0 if self.has_last_dropout: self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout) def call(self, inputs, cls_index=None, training=False): if not isinstance(inputs, (dict, tuple, list)): hidden_states = inputs elif isinstance(inputs, (tuple, list)): hidden_states = inputs[0] cls_index = inputs[1] if len(inputs) > 1 else None assert len(inputs) <= 2, "Too many inputs." else: hidden_states = inputs.get("hidden_states") cls_index = inputs.get("cls_index", None) if self.summary_type == "last": output = hidden_states[:, -1] elif self.summary_type == "first": output = hidden_states[:, 0] elif self.summary_type == "mean": output = tf.reduce_mean(hidden_states, axis=1) elif self.summary_type == "cls_index": hidden_shape = shape_list(hidden_states) # e.g. [batch, num choices, seq length, hidden dims] if cls_index is None: cls_index = tf.fill( hidden_shape[:-2], hidden_shape[-2] - 1 ) # A tensor full of shape [batch] or [batch, num choices] full of sequence length cls_shape = shape_list(cls_index) if len(cls_shape) <= len(hidden_shape) - 2: cls_index = tf.expand_dims(cls_index, axis=-1) # else: # cls_index = cls_index[..., tf.newaxis] # cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2) output = tf.squeeze( output, axis=len(hidden_shape) - 2 ) # shape of output: (batch, num choices, hidden_size) elif self.summary_type == "attn": raise NotImplementedError if self.has_first_dropout: output = self.first_dropout(output, training=training) if self.has_summary: output = self.summary(output) if self.has_activation: output = self.activation(output) if self.has_last_dropout: output = self.last_dropout(output, training=training) return output def shape_list(tensor: tf.Tensor) -> List[int]: """ Deal with dynamic shape in tensorflow cleanly. Args: tensor (:obj:`tf.Tensor`): The tensor we want the shape of. Returns: :obj:`List[int]`: The shape of the tensor as a list. """ dynamic = tf.shape(tensor) if tensor.shape == tf.TensorShape(None): return dynamic static = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(static)] def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal: """ Creates a :obj:`tf.initializers.TruncatedNormal` with the given range. Args: initializer_range (`float`, defaults to 0.02): Standard deviation of the initializer range. Returns: :obj:`tf.initializers.TruncatedNormal`: The truncated normal initializer. """ return tf.keras.initializers.TruncatedNormal(stddev=initializer_range) class TFWrappedEmbeddings: """ this class wraps a the TFSharedEmbeddingTokens layer into a python 'no-keras-layer' class to avoid problem with weight restoring. Also it makes sure that the layer is called from the correct scope to avoid problem with saving/storing the correct weights """ def __init__(self, layer, abs_scope_name=None): self._layer = layer self._abs_scope_name = abs_scope_name def call(self, inputs, mode="embedding"): if self._abs_scope_name is None: return self._layer.call(inputs, mode) # if an abs scope name is given to the embedding variable, call variable from absolute scope with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name: with tf.name_scope(abs_scope_name.original_name_scope): return self._layer.call(inputs, mode) def __call__(self, inputs, mode="embedding"): if self._abs_scope_name is None: return self._layer(inputs, mode) # if an abs scope name is given to the embedding variable, call variable from absolute scope with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name: with tf.name_scope(abs_scope_name.original_name_scope): return self._layer(inputs, mode)
import sys import logging from lusid.utilities import ApiClientFactory from lusidtools.cocoon import ( load_data_to_df_and_detect_delimiter, load_from_data_frame, parse_args, identify_cash_items, validate_mapping_file_structure, load_json_file, cocoon_printer, ) from lusidtools.logger import LusidLogger def load_transactions(args): file_type = "transactions" factory = ApiClientFactory(api_secrets_filename=args["secrets_file"]) if args["delimiter"]: logging.info(f"delimiter specified as {repr(args["delimiter"])}") logging.debug("Getting data") transactions = load_data_to_df_and_detect_delimiter(args) mappings = load_json_file(args["mapping"]) if "cash_flag" in mappings.keys(): identify_cash_items(transactions, mappings, file_type) validate_mapping_file_structure(mappings, transactions.columns, file_type) if args["dryrun"]: logging.info("--dryrun specified as True, exiting before upsert call is made") return 0 transactions_response = load_from_data_frame( api_factory=factory, data_frame=transactions, scope=args["scope"], identifier_mapping=mappings[file_type]["identifier_mapping"], mapping_required=mappings[file_type]["required"], mapping_optional=mappings[file_type]["optional"] if "optional" in mappings[file_type].keys() else {}, file_type=file_type, batch_size=args["batch_size"], property_columns=mappings[file_type]["property_columns"] if "property_columns" in mappings[file_type].keys() else [], ) # print_response(transactions_response, file_type) succ, errors = cocoon_printer.format_transactions_response(transactions_response) logging.info(f"number of successful upserts: {len(succ)}") logging.info(f"number of errors : {len(errors)}") if args["display_response_head"]: logging.info(succ.head(40)) logging.info(errors.head(40)) return transactions_response def main(): args, ap = parse_args(sys.argv[1:]) LusidLogger(args["debug"]) load_transactions(args) return 0 if __name__ == "__main__": main()
import sys import logging from lusid.utilities import ApiClientFactory from lusidtools.cocoon import ( load_data_to_df_and_detect_delimiter, load_from_data_frame, parse_args, identify_cash_items, validate_mapping_file_structure, load_json_file, cocoon_printer, ) from lusidtools.logger import LusidLogger def load_transactions(args): file_type = "transactions" factory = ApiClientFactory(api_secrets_filename=args["secrets_file"]) if args["delimiter"]: logging.info(f"delimiter specified as {repr(args['delimiter'])}") logging.debug("Getting data") transactions = load_data_to_df_and_detect_delimiter(args) mappings = load_json_file(args["mapping"]) if "cash_flag" in mappings.keys(): identify_cash_items(transactions, mappings, file_type) validate_mapping_file_structure(mappings, transactions.columns, file_type) if args["dryrun"]: logging.info("--dryrun specified as True, exiting before upsert call is made") return 0 transactions_response = load_from_data_frame( api_factory=factory, data_frame=transactions, scope=args["scope"], identifier_mapping=mappings[file_type]["identifier_mapping"], mapping_required=mappings[file_type]["required"], mapping_optional=mappings[file_type]["optional"] if "optional" in mappings[file_type].keys() else {}, file_type=file_type, batch_size=args["batch_size"], property_columns=mappings[file_type]["property_columns"] if "property_columns" in mappings[file_type].keys() else [], ) # print_response(transactions_response, file_type) succ, errors = cocoon_printer.format_transactions_response(transactions_response) logging.info(f"number of successful upserts: {len(succ)}") logging.info(f"number of errors : {len(errors)}") if args["display_response_head"]: logging.info(succ.head(40)) logging.info(errors.head(40)) return transactions_response def main(): args, ap = parse_args(sys.argv[1:]) LusidLogger(args["debug"]) load_transactions(args) return 0 if __name__ == "__main__": main()
import discord import git from discord.ext import commands from discord.ext.commands import CommandNotFound class Management(commands.Cog): """ Set of commands for Administration. """ def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_error(self, ctx, error): if isinstance(error, CommandNotFound): return raise error @commands.command(name='setcolor', no_pm=True) async def set_member_color(self, ctx, role: discord.Role, color: discord.Color): """ Color the nickname of the participant. * Let there be bright colors and colors! * [!] In development. Arguments: color in HEX For example: !setcolor #FF0000 """ try: await role.edit(color=color) if not role.is_default(): embed = discord.Embed(title=f"Changed the role color for {role.name} to {color}") await ctx.send(embed=embed) else: embed = discord.Embed(title="Peribot cannot affect the default roles.") await ctx.send(embed=embed) except discord.Forbidden: embed = discord.Embed(title="Peribot does not have permissions to change roles." ) await ctx.send(embed=embed) except discord.HTTPException: embed = discord.Embed(title=f"Peribot failed to update {role.name}'s color" ) await ctx.send(embed=embed) except discord.InvalidArgument: embed = discord.Embed(title=f"Invalid Arguments!", description="!setcolor @Role [Hex Code or Generic Name]") await ctx.send(embed=embed) except discord.ext.commands.errors.BadArgument: embed = discord.Embed(title=f"Invalid Arguments!", description="!setcolor @Role [Hex Code or Generic Name]") await ctx.send(embed=embed) @commands.command(name='nick', aliases=["setnick"]) @commands.cooldown(1, 21600, commands.BucketType.user) async def nick(self, ctx, user: discord.Member, *, nick): if ctx.author.id == 309089769663496194 or ctx.author.id == 204792579881959424: await user.edit(nick=nick, reason="Jeep made me do it") @commands.command(name='gitpull') async def git_pull(self, ctx): if ctx.author.id == 204792579881959424: git_dir = "./" try: g = git.cmd.Git(git_dir) g.pull() embed = discord.Embed(title=":white_check_mark: Successfully pulled from repository", color=0x00df00) await ctx.channel.send(embed=embed) except Exception as e: errno, strerror = e.args embed = discord.Embed(title="Command Error!", description=f"Git Pull Error: {errno} - {strerror}", color=0xff0007) await ctx.channel.send(embed=embed) else: await ctx.send("You don't have access to this command!") @commands.command(name='mute') @commands.has_permissions(manage_messages=True) async def mute(self, ctx, user: discord.User): pass @commands.command(name='pin') @commands.has_permissions(manage_messages=True) async def pin_message(self, ctx, *, message): """Copy your message in a stylish and modern frame, and then fix it! Arguments: `: message` - message __ __ For example: ``` !pin This text was written by the ancient Elves in the name of Discord! ``` """ embed = discord.Embed(color=0x71f442, title='Pin it up!', description=message) embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) embed.set_footer(text=f'{ctx.prefix}{ctx.command}') msg = await ctx.send(embed=embed) await ctx.message.delete() await msg.pin() # # @commands.command(name='resetmute', ) # @commands.has_permissions(manage_roles=True) # async def resetmute(self, ctx): # """Reset the settings of` !mute` and remove the role of PeriMute. * When peace times have arrived, without flooding! * # """ # # mute = discord.utils.get(ctx.guild.roles, name='PeriMute') # if not mute: # return await ctx.send('User isn\'t muted.') # # try: # await mute.delete() # # except discord.errors.Forbidden: # await ctx.message.add_reaction('❌') # # else: # await ctx.message.add_reaction('✅') # # # @commands.command(name='mute') # # @commands.has_permissions(manage_roles=True) # # async def mute(self, ctx, member: discord.Member, *, reason: str = 'отсутствует'): # # """Mute the member. * He will not be able to send messages, cool! * # #         Arguments: # #         `: member` - member # #         `: reason` - reason # #         __ __ # #         For example: # #         ``` # #         !mute @Username#1234 Spam # #         !mute @Username # #         ``` # #         For the team to work correctly, I need to make some edits to the text channels and roles on this server. # #         [!?] What changes will be made? # #         > PeriMute role will be created; # #         > The PeriMute role will be added to the access settings of all text feeds; # #         > All roles (except @everyone) will have the "send_messages" right (sending messages) removed; ``` # #         """ # # mute = discord.utils.get(ctx.guild.roles, name='NaomiMute') # # # # if not mute: # # try: # # def message_check(m): # # return m.author.id == ctx.author.id # # # # failed_channels = [] # # failed_roles = [] # # # # await ctx.send( # # f'The command {ctx.prefix} {ctx.command} was used for the first time on this server. \nCan I make changes to the channel and role settings for this command to work correctly? (Yes/No)', # # delete_after=120.0) # # msg = await self.bot.wait_for('message', check=message_check, timeout=120.0) # # # # if msg.content.lower() in ['yes', 'aha', 'yep']: # # counter_msg = await ctx.send( # # 'Ok, I’m working ... \nModification of channels: pending. \nModification of roles: pending.') # # # # mute = await ctx.guild.create_role(name='PeriMute', # # reason='The !mute command was used, but the "PeriMute" role was missing.') # # # # modified_channels = 0 # # modified_roles = 0 # # for tchannel in ctx.guild.text_channels: # # try: # # await tchannel.set_permissions(mute, # # send_messages=False, # # add_reactions=False) # # # # except: # # failed_channels.append(f'`{tchannel.name}`') # # # # else: # # modified_channels += 1 # # try: # # await counter_msg.edit( # # content=f'Хорошо, выполняю... \nМодификация каналов: {modified_channels}/{len(ctx.guild.text_channels)}\nМодификация ролей: в ожидании.') # # except: # # pass # # # # # mute_perms = discord.Permissions() # # # mute_perms.update(send_messages=False) # # # К черту discord.Permissions() # # # # mute_perms = discord.PermissionOverwrite() # # mute_perms.send_messages = False # # mute_perms.add_reactions = False # # # # for role in ctx.guild.roles: # # if role != ctx.guild.default_role: # # try: # # await role.edit(permissions=mute_perms) # # except: # # failed_roles.append(f'`{role.name}`') # # else: # # modified_roles += 1 # # await counter_msg.edit( # # content=f'Хорошо, выполняю... \nМодификация каналов: {modified_roles}/{len(ctx.guild.text_channels)}.\nМодификация ролей: {x1}/{len(ctx.guild.roles) - 1}') # # else: # # return await ctx.send(':x: Отменено.') # # except asyncio.TimeoutError: # # await ctx.send( # # 'Я не столь терпелива, чтобы ждать ответа так долго...\nПросто повторно введите команду.') # # try: # # if not len(failed_channels) == 0 or not len(failed_roles) == 0: # # await ctx.send( # # f'Модификация завершена не полностью:\n- Каналы: {', '.join(failed_channels)}\n- Роли: {', '.join(failed_roles)}') # # except: # # pass # # # # await member.add_roles(mute, reason='Был приглушен через n!mute.') # # # # embed = discord.Embed(timestamp=ctx.message.created_at, color=0x35FF81, # # description=f'Участник {member.mention} приглушен.\nПричина: {reason}') # # embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) # # embed.set_footer(text=f'{ctx.prefix}{ctx.command}') # # # # await ctx.send(embed=embed) # # # @commands.command(name='unmute') # # @commands.has_permissions(manage_roles=True) # # async def unmute(self, ctx, member: discord.Member, *, reason: str = 'отсутствует'): # # """Снять приглушение с участника. *Да будет свобода чата!* # # Аргументы: # # `:member` - участник # # `:reason` - причина # # __ __ # # Например: # # ``` # # n!unmute @Username#1234 # # n!unmute Username Просто так # # ``` # # """ # # # # mute = discord.utils.get(ctx.guild.roles, name='NaomiMute') # # # # if not mute: # # embed = discord.Embed(timestamp=ctx.message.created_at, color=0xff0000, # # description='Не найдена роль "NaomiMute", а раз ее нет, то и снимать мут мне не с кого...') # # embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) # # embed.set_footer(text=f'{ctx.prefix}{ctx.command}') # # # # elif mute not in member.roles: # # embed = discord.Embed(timestamp=ctx.message.created_at, color=0xff0000, # # description=f'{member.mention} не приглушен!') # # embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) # # embed.set_footer(text=f'{ctx.prefix}{ctx.command}') # # # # else: # # await member.remove_roles(mute, reason='Приглушение снято - n!unmute.') # # # # embed = discord.Embed(timestamp=ctx.message.created_at, color=0x35FF81, # # description=f'Снято приглушение с участника {member.mention}.\nПричина: {reason}') # # embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) # # embed.set_footer(text=f'{ctx.prefix}{ctx.command}') # # # # await ctx.send(embed=embed) # # # @commands.command(name='cleanup', ) # @commands.has_permissions(manage_messages=True) # async def cleanup(self, ctx, member: discord.Member, count: int): # # """Delete messages from a specific member. # Arguments: # `: member` - member # `: count` - number of messages # __ __ # For example: # ``` # !cleanup @ Username # 1234 5 # !cleanup Username 100 # ``` # """ # if count > 100: # await ctx.send(f'Число сообщений не должно превышать {count}.') # else: # def is_member(m): # return m.message.author == member # # await ctx.channel.purge(limit=count, check=is_member) # # @commands.command(name='ban', ) # @commands.has_permissions(ban_members=True) # async def ban(self, ctx, member: discord.Member, *, reason: str = 'N/A'): # # """Block a member on the server. * Yes, the banhammer will rise above <member>! * # Arguments: # `: member` - member # `: reason` - reason # __ __ # For example: # ``` # !ban Username You were a bad guy # !ban @ Username # 1234 # ``` # """ # await ctx.guild.ban(user=member, reason=reason) # # embed = discord.Embed(timestamp=ctx.message.created_at, color=0x00ff00, # description=f'Пользователь {member.mention} забанен!\nПричина: {reason}.') # embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) # embed.set_footer(text=f'{ctx.prefix}{ctx.command}') # # await ctx.send(embed=embed) # # # @commands.command(name='unban', aliases=['pardon']) # # @commands.has_permissions(ban_members=True) # # async def unban(self, ctx, user: discord.User, *, reason: str = 'N/A'): # # # # """Unblock a member on the server. # # Arguments: # # `: user` - user # # `: reason` - reason # # __ __ # # For example: # # ``` # # !unban @ Username # 1234 You're good # # ``` # # """ # # ban_entries = await ctx.guild.bans() # # banned_users = [user.user.name for user in ban_entries] # # # # for u in banned_users: # # if u.id == user.id: # # try: # # await ctx.guild.unban(user=u, reason=reason) # # except: # # stats = f'Не удалось разбанить {user}.' # # else: # # stats = f'Пользователь {u.mention} успешно разбанен.' # # # # embed = discord.Embed(timestamp=ctx.message.created_at, color=0xFF0000, # # description=stats) # # embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) # # embed.set_footer(text=f'{ctx.prefix}{ctx.command}') # # # # await ctx.send(embed=embed) # # @commands.command(name='banlist', aliases=['bans'], ) # @commands.has_permissions(ban_members=True) # async def banlist(self, ctx): # """ # List of banned members. # """ # # bans = await ctx.guild.bans() # # if len(bans) <= 0: # embed = discord.Embed(timestamp=ctx.message.created_at, # color=0xff0000, # description='No banned users.') # else: # embed = discord.Embed(timestamp=ctx.message.created_at, # color=0xff0000, # description=f'Banned users:\n{', '.join([user.user.name for user in bans])}') # embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) # embed.set_footer(text=f'{ctx.prefix}{ctx.command}') # # await ctx.send(embed=embed) # @commands.command(name='kick') @commands.has_permissions(kick_members=True) async def kick(self, ctx, member: discord.Member, *, reason: str = 'N/A'): """ `:member` - The person you are kicking `:reason` - Reason for kick """ try: await member.kick(reason=reason) except Exception as e: await ctx.send("error") return embed = discord.Embed(timestamp=ctx.message.created_at, color=0x00ff00, description=f'User {member.name} was kicked.\nReason: {reason}.') embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) embed.set_footer(text=f'{ctx.prefix}{ctx.command}') await ctx.send(embed=embed) @commands.command(name='ban') @commands.has_permissions(ban_members=True) async def ban(self, ctx, member: discord.Member, *, reason: str = 'N/A', delete: int = 0): """ `:member` - The person you are banning @ them `:reason` - Reason for kick """ try: await member.ban(reason=reason, delete_message_days=delete) except discord.Forbidden: embed = discord.Embed(title="Command Error!", description=f"I do not have permissions to do that", color=discord.Color.red()) await ctx.send(embed=embed) return except discord.HTTPException: embed = discord.Embed(title="Command Error!", description=f"Banning failed. Try again", color=discord.Color.red()) await ctx.send(embed=embed) return embed = discord.Embed(timestamp=ctx.message.created_at, color=0x00ff00, description=f'User {member.name} was banned.\nReason: {reason}.\nMessages Deleted: {delete} days') embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) embed.set_footer(text=f'{ctx.prefix}{ctx.command}') await ctx.send(embed=embed) @commands.command(name='unban') @commands.has_permissions(ban_members=True) async def unban(self, ctx, member: int, *, reason: str = 'N/A'): """ `:member` - The person you are unbanning (their ID) `:reason` - Reason for kick """ for banentry in await ctx.guild.bans(): if member == banentry.user.id: try: await ctx.guild.unban(banentry.user, reason=reason) except discord.Forbidden: embed = discord.Embed(title="Command Error!", description=f"I do not have permissions to do that", color=discord.Color.red()) await ctx.send(embed=embed) return except discord.HTTPException: embed = discord.Embed(title="Command Error!", description=f"Unbanning failed. Try again", color=discord.Color.red()) await ctx.send(embed=embed) return embed = discord.Embed(timestamp=ctx.message.created_at, color=0x00ff00, description=f'User {banentry.user.name} was unbanned.\nReason: {reason}.') embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) embed.set_footer(text=f'{ctx.prefix}{ctx.command}') await ctx.send(embed=embed) def setup(bot): bot.add_cog(Management(bot))
import discord import git from discord.ext import commands from discord.ext.commands import CommandNotFound class Management(commands.Cog): """ Set of commands for Administration. """ def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_error(self, ctx, error): if isinstance(error, CommandNotFound): return raise error @commands.command(name='setcolor', no_pm=True) async def set_member_color(self, ctx, role: discord.Role, color: discord.Color): """ Color the nickname of the participant. * Let there be bright colors and colors! * [!] In development. Arguments: color in HEX For example: !setcolor #FF0000 """ try: await role.edit(color=color) if not role.is_default(): embed = discord.Embed(title=f"Changed the role color for {role.name} to {color}") await ctx.send(embed=embed) else: embed = discord.Embed(title="Peribot cannot affect the default roles.") await ctx.send(embed=embed) except discord.Forbidden: embed = discord.Embed(title="Peribot does not have permissions to change roles." ) await ctx.send(embed=embed) except discord.HTTPException: embed = discord.Embed(title=f"Peribot failed to update {role.name}'s color" ) await ctx.send(embed=embed) except discord.InvalidArgument: embed = discord.Embed(title=f"Invalid Arguments!", description="!setcolor @Role [Hex Code or Generic Name]") await ctx.send(embed=embed) except discord.ext.commands.errors.BadArgument: embed = discord.Embed(title=f"Invalid Arguments!", description="!setcolor @Role [Hex Code or Generic Name]") await ctx.send(embed=embed) @commands.command(name='nick', aliases=["setnick"]) @commands.cooldown(1, 21600, commands.BucketType.user) async def nick(self, ctx, user: discord.Member, *, nick): if ctx.author.id == 309089769663496194 or ctx.author.id == 204792579881959424: await user.edit(nick=nick, reason="Jeep made me do it") @commands.command(name='gitpull') async def git_pull(self, ctx): if ctx.author.id == 204792579881959424: git_dir = "./" try: g = git.cmd.Git(git_dir) g.pull() embed = discord.Embed(title=":white_check_mark: Successfully pulled from repository", color=0x00df00) await ctx.channel.send(embed=embed) except Exception as e: errno, strerror = e.args embed = discord.Embed(title="Command Error!", description=f"Git Pull Error: {errno} - {strerror}", color=0xff0007) await ctx.channel.send(embed=embed) else: await ctx.send("You don't have access to this command!") @commands.command(name='mute') @commands.has_permissions(manage_messages=True) async def mute(self, ctx, user: discord.User): pass @commands.command(name='pin') @commands.has_permissions(manage_messages=True) async def pin_message(self, ctx, *, message): """Copy your message in a stylish and modern frame, and then fix it! Arguments: `: message` - message __ __ For example: ``` !pin This text was written by the ancient Elves in the name of Discord! ``` """ embed = discord.Embed(color=0x71f442, title='Pin it up!', description=message) embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) embed.set_footer(text=f'{ctx.prefix}{ctx.command}') msg = await ctx.send(embed=embed) await ctx.message.delete() await msg.pin() # # @commands.command(name='resetmute', ) # @commands.has_permissions(manage_roles=True) # async def resetmute(self, ctx): # """Reset the settings of` !mute` and remove the role of PeriMute. * When peace times have arrived, without flooding! * # """ # # mute = discord.utils.get(ctx.guild.roles, name='PeriMute') # if not mute: # return await ctx.send('User isn\'t muted.') # # try: # await mute.delete() # # except discord.errors.Forbidden: # await ctx.message.add_reaction('❌') # # else: # await ctx.message.add_reaction('✅') # # # @commands.command(name='mute') # # @commands.has_permissions(manage_roles=True) # # async def mute(self, ctx, member: discord.Member, *, reason: str = 'отсутствует'): # # """Mute the member. * He will not be able to send messages, cool! * # #         Arguments: # #         `: member` - member # #         `: reason` - reason # #         __ __ # #         For example: # #         ``` # #         !mute @Username#1234 Spam # #         !mute @Username # #         ``` # #         For the team to work correctly, I need to make some edits to the text channels and roles on this server. # #         [!?] What changes will be made? # #         > PeriMute role will be created; # #         > The PeriMute role will be added to the access settings of all text feeds; # #         > All roles (except @everyone) will have the "send_messages" right (sending messages) removed; ``` # #         """ # # mute = discord.utils.get(ctx.guild.roles, name='NaomiMute') # # # # if not mute: # # try: # # def message_check(m): # # return m.author.id == ctx.author.id # # # # failed_channels = [] # # failed_roles = [] # # # # await ctx.send( # # f'The command {ctx.prefix} {ctx.command} was used for the first time on this server. \nCan I make changes to the channel and role settings for this command to work correctly? (Yes/No)', # # delete_after=120.0) # # msg = await self.bot.wait_for('message', check=message_check, timeout=120.0) # # # # if msg.content.lower() in ['yes', 'aha', 'yep']: # # counter_msg = await ctx.send( # # 'Ok, I’m working ... \nModification of channels: pending. \nModification of roles: pending.') # # # # mute = await ctx.guild.create_role(name='PeriMute', # # reason='The !mute command was used, but the "PeriMute" role was missing.') # # # # modified_channels = 0 # # modified_roles = 0 # # for tchannel in ctx.guild.text_channels: # # try: # # await tchannel.set_permissions(mute, # # send_messages=False, # # add_reactions=False) # # # # except: # # failed_channels.append(f'`{tchannel.name}`') # # # # else: # # modified_channels += 1 # # try: # # await counter_msg.edit( # # content=f'Хорошо, выполняю... \nМодификация каналов: {modified_channels}/{len(ctx.guild.text_channels)}\nМодификация ролей: в ожидании.') # # except: # # pass # # # # # mute_perms = discord.Permissions() # # # mute_perms.update(send_messages=False) # # # К черту discord.Permissions() # # # # mute_perms = discord.PermissionOverwrite() # # mute_perms.send_messages = False # # mute_perms.add_reactions = False # # # # for role in ctx.guild.roles: # # if role != ctx.guild.default_role: # # try: # # await role.edit(permissions=mute_perms) # # except: # # failed_roles.append(f'`{role.name}`') # # else: # # modified_roles += 1 # # await counter_msg.edit( # # content=f'Хорошо, выполняю... \nМодификация каналов: {modified_roles}/{len(ctx.guild.text_channels)}.\nМодификация ролей: {x1}/{len(ctx.guild.roles) - 1}') # # else: # # return await ctx.send(':x: Отменено.') # # except asyncio.TimeoutError: # # await ctx.send( # # 'Я не столь терпелива, чтобы ждать ответа так долго...\nПросто повторно введите команду.') # # try: # # if not len(failed_channels) == 0 or not len(failed_roles) == 0: # # await ctx.send( # # f'Модификация завершена не полностью:\n- Каналы: {", ".join(failed_channels)}\n- Роли: {", ".join(failed_roles)}') # # except: # # pass # # # # await member.add_roles(mute, reason='Был приглушен через n!mute.') # # # # embed = discord.Embed(timestamp=ctx.message.created_at, color=0x35FF81, # # description=f'Участник {member.mention} приглушен.\nПричина: {reason}') # # embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) # # embed.set_footer(text=f'{ctx.prefix}{ctx.command}') # # # # await ctx.send(embed=embed) # # # @commands.command(name='unmute') # # @commands.has_permissions(manage_roles=True) # # async def unmute(self, ctx, member: discord.Member, *, reason: str = 'отсутствует'): # # """Снять приглушение с участника. *Да будет свобода чата!* # # Аргументы: # # `:member` - участник # # `:reason` - причина # # __ __ # # Например: # # ``` # # n!unmute @Username#1234 # # n!unmute Username Просто так # # ``` # # """ # # # # mute = discord.utils.get(ctx.guild.roles, name='NaomiMute') # # # # if not mute: # # embed = discord.Embed(timestamp=ctx.message.created_at, color=0xff0000, # # description='Не найдена роль "NaomiMute", а раз ее нет, то и снимать мут мне не с кого...') # # embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) # # embed.set_footer(text=f'{ctx.prefix}{ctx.command}') # # # # elif mute not in member.roles: # # embed = discord.Embed(timestamp=ctx.message.created_at, color=0xff0000, # # description=f'{member.mention} не приглушен!') # # embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) # # embed.set_footer(text=f'{ctx.prefix}{ctx.command}') # # # # else: # # await member.remove_roles(mute, reason='Приглушение снято - n!unmute.') # # # # embed = discord.Embed(timestamp=ctx.message.created_at, color=0x35FF81, # # description=f'Снято приглушение с участника {member.mention}.\nПричина: {reason}') # # embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) # # embed.set_footer(text=f'{ctx.prefix}{ctx.command}') # # # # await ctx.send(embed=embed) # # # @commands.command(name='cleanup', ) # @commands.has_permissions(manage_messages=True) # async def cleanup(self, ctx, member: discord.Member, count: int): # # """Delete messages from a specific member. # Arguments: # `: member` - member # `: count` - number of messages # __ __ # For example: # ``` # !cleanup @ Username # 1234 5 # !cleanup Username 100 # ``` # """ # if count > 100: # await ctx.send(f'Число сообщений не должно превышать {count}.') # else: # def is_member(m): # return m.message.author == member # # await ctx.channel.purge(limit=count, check=is_member) # # @commands.command(name='ban', ) # @commands.has_permissions(ban_members=True) # async def ban(self, ctx, member: discord.Member, *, reason: str = 'N/A'): # # """Block a member on the server. * Yes, the banhammer will rise above <member>! * # Arguments: # `: member` - member # `: reason` - reason # __ __ # For example: # ``` # !ban Username You were a bad guy # !ban @ Username # 1234 # ``` # """ # await ctx.guild.ban(user=member, reason=reason) # # embed = discord.Embed(timestamp=ctx.message.created_at, color=0x00ff00, # description=f'Пользователь {member.mention} забанен!\nПричина: {reason}.') # embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) # embed.set_footer(text=f'{ctx.prefix}{ctx.command}') # # await ctx.send(embed=embed) # # # @commands.command(name='unban', aliases=['pardon']) # # @commands.has_permissions(ban_members=True) # # async def unban(self, ctx, user: discord.User, *, reason: str = 'N/A'): # # # # """Unblock a member on the server. # # Arguments: # # `: user` - user # # `: reason` - reason # # __ __ # # For example: # # ``` # # !unban @ Username # 1234 You're good # # ``` # # """ # # ban_entries = await ctx.guild.bans() # # banned_users = [user.user.name for user in ban_entries] # # # # for u in banned_users: # # if u.id == user.id: # # try: # # await ctx.guild.unban(user=u, reason=reason) # # except: # # stats = f'Не удалось разбанить {user}.' # # else: # # stats = f'Пользователь {u.mention} успешно разбанен.' # # # # embed = discord.Embed(timestamp=ctx.message.created_at, color=0xFF0000, # # description=stats) # # embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) # # embed.set_footer(text=f'{ctx.prefix}{ctx.command}') # # # # await ctx.send(embed=embed) # # @commands.command(name='banlist', aliases=['bans'], ) # @commands.has_permissions(ban_members=True) # async def banlist(self, ctx): # """ # List of banned members. # """ # # bans = await ctx.guild.bans() # # if len(bans) <= 0: # embed = discord.Embed(timestamp=ctx.message.created_at, # color=0xff0000, # description='No banned users.') # else: # embed = discord.Embed(timestamp=ctx.message.created_at, # color=0xff0000, # description=f'Banned users:\n{", ".join([user.user.name for user in bans])}') # embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) # embed.set_footer(text=f'{ctx.prefix}{ctx.command}') # # await ctx.send(embed=embed) # @commands.command(name='kick') @commands.has_permissions(kick_members=True) async def kick(self, ctx, member: discord.Member, *, reason: str = 'N/A'): """ `:member` - The person you are kicking `:reason` - Reason for kick """ try: await member.kick(reason=reason) except Exception as e: await ctx.send("error") return embed = discord.Embed(timestamp=ctx.message.created_at, color=0x00ff00, description=f'User {member.name} was kicked.\nReason: {reason}.') embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) embed.set_footer(text=f'{ctx.prefix}{ctx.command}') await ctx.send(embed=embed) @commands.command(name='ban') @commands.has_permissions(ban_members=True) async def ban(self, ctx, member: discord.Member, *, reason: str = 'N/A', delete: int = 0): """ `:member` - The person you are banning @ them `:reason` - Reason for kick """ try: await member.ban(reason=reason, delete_message_days=delete) except discord.Forbidden: embed = discord.Embed(title="Command Error!", description=f"I do not have permissions to do that", color=discord.Color.red()) await ctx.send(embed=embed) return except discord.HTTPException: embed = discord.Embed(title="Command Error!", description=f"Banning failed. Try again", color=discord.Color.red()) await ctx.send(embed=embed) return embed = discord.Embed(timestamp=ctx.message.created_at, color=0x00ff00, description=f'User {member.name} was banned.\nReason: {reason}.\nMessages Deleted: {delete} days') embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) embed.set_footer(text=f'{ctx.prefix}{ctx.command}') await ctx.send(embed=embed) @commands.command(name='unban') @commands.has_permissions(ban_members=True) async def unban(self, ctx, member: int, *, reason: str = 'N/A'): """ `:member` - The person you are unbanning (their ID) `:reason` - Reason for kick """ for banentry in await ctx.guild.bans(): if member == banentry.user.id: try: await ctx.guild.unban(banentry.user, reason=reason) except discord.Forbidden: embed = discord.Embed(title="Command Error!", description=f"I do not have permissions to do that", color=discord.Color.red()) await ctx.send(embed=embed) return except discord.HTTPException: embed = discord.Embed(title="Command Error!", description=f"Unbanning failed. Try again", color=discord.Color.red()) await ctx.send(embed=embed) return embed = discord.Embed(timestamp=ctx.message.created_at, color=0x00ff00, description=f'User {banentry.user.name} was unbanned.\nReason: {reason}.') embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) embed.set_footer(text=f'{ctx.prefix}{ctx.command}') await ctx.send(embed=embed) def setup(bot): bot.add_cog(Management(bot))
import abc # import asyncio import shared.utilities.util as util # import json import importlib # import sys from daq.daq import DAQ from daq.interface.ifdevice import IFDevice, DummyIFDevice, IFDeviceFactory from daq.manager.manager import Managers class InterfaceFactory(): @staticmethod def create(config, ui_config=None, **kwargs): print(config) create_cfg = config['INTERFACE'] ifconfig = config['IFCONFIG'] # inst_config = config['instrument'] # print("module: " + create_cfg['MODULE']) # print("class: " + create_cfg['CLASS']) try: # print('Creating: ' + config['name']) # print(' ClassName: ' + config['class']) mod_ = importlib.import_module(create_cfg['MODULE']) # print(f'mod: {mod_}') cls_ = getattr(mod_, create_cfg['CLASS']) # print(f'cls: {cls_}') # inst_class = eval(config['class']) # return inst_class.factory_create() test = cls_(ifconfig, ui_config=ui_config, **kwargs) # print(test) return test except Exception as e: print(f"Interface: Unexpected error: {e}") raise class Interface(DAQ): class_type = 'INTERFACE' def __init__(self, config, ui_config=None, **kwargs): super(Interface, self).__init__(config, ui_config=ui_config, **kwargs) print('Interface init') self.name = 'Interface' self.kwargs = kwargs # print(config) # self.config = config # if no NAME given, create one with address # self.label = self.config['LABEL'] # Message buffers (Queues) # self.msg_send_buffer = None # self.msg_rcv_buffer = None # self.ifdev_send_buffer = None # self.ifdev_rcv_buffer = None # self.gui_send_buffer = None # # self.create_msg_buffer() # # self.read_q = config['IF_READ_Q'] # # self.write_q = config['IF_WRITE_Q'] # self.task_list = [] self.ifdevice_manager = Managers().get('IFDeviceManager') print(f'device_manager: {self.ifdevice_manager}') self.ifdevice = None # print(self.dev_mananger) def setup(self): self.do_ui_connection = False super().setup() self.add_ifdevice() @abc.abstractmethod def add_ifdevice(self): pass # print('Add ifdevice') # # print(f'config = {self.config['IFACE_LIST']}') # # for k, ifcfg in self.config['IFACE_LIST'].items(): # # # self.iface_map[iface.name] = iface # # # print(ifcfg['IFACE_CONFIG']) # # # print(ifcfg['INTERFACE']) # # # iface = InterfaceFactory().create(ifcfg['IFACE_CONFIG']) # # iface = InterfaceFactory().create(ifcfg) # # print(f'iface: {iface}') # # # iface.msg_buffer = self.iface_rcv_buffer # # iface.msg_send_buffer = self.from_child_buf # # self.iface_map[iface.get_id()] = iface # # TODO: remove hardcode and use config # ifdev_config = json.loads( # '{"IFDEVICE": {"MODULE": "daq.interface.ifdevice", "CLASS": "DummyIFDevice"}, "IFDEVCONFIG": {"DESCRIPTION": {"LABEL": "Dummy IFDevice", "SERIAL_NUMBER": "1234", "PROPERTY_NUMBER": "CD0001234"}}}') # ui_config = dict() # # ui_config['do_ui_connection'] = False # self.ifdevice = self.ifdevice_manager.create( # 'DummyIFDevice', ifdev_config, ui_config=ui_config) # self.ifdevice.to_parent_buf = self.from_child_buf def get_ui_address(self): print(self.label) address = 'envdaq/interface/'+self.label+'/' print(f'get_ui_address: {address}') return address # def connect(self, cmd=None): # pass def enable(self): print('Enabling Interface') super().enable() self.ifdevice.register_parent( self.get_id(), to_parent_buffer=self.from_child_buf ) def disable(self): print("interface disable dereg parent") self.ifdevice.deregister_parent(self.get_id()) print("interface disable super") super().disable() def start(self, cmd=None): print('Starting Interface') super().start(cmd) # task = asyncio.ensure_future(self.read_loop()) # self.task_list.append(task) # if self.ifdevice is not None: # self.ifdevice.start() # Changed to allow multiple interface instances # for a given device. Device will run as long # as there are interfaces registered # self.ifdevice.register_parent( # self.get_id(), # to_parent_buffer=self.from_child_buf # ) # def read(self, cmd=None): # pass # def write(self, cmd): # pass # TODO: howto clean up managers? def stop(self, cmd=None): # for instrument in self.inst_map: # # print(sensor) # instrument.stop() # tasks = asyncio.Task.all_tasks() # for t in self.task_list: # # print(t) # t.cancel() # if self.ifdevice is not None: # self.ifdevice.stop() # self.ifdevice.deregister_parent(self.get_id()) super().stop(cmd) async def shutdown(self): pass # for instrument in self.inst_map: # # print(sensor) # instrument.stop() # tasks = asyncio.Task.all_tasks() # for t in self.task_list: # # print(t) # t.cancel() # if self.ifdevice is not None: # self.ifdevice.deregister_parent(self.get_id()) # self.ifdevice.shutdown() # while self.status2.get_registration_status() != Status.UNREGISTERING: # TODO use if we start connecting to UI # await super().shutdown() # def disconnect(self, cmd=None): # pass # async def read_loop(self): # print('*****Interface: starting read_loop') # while True: # print(self.ifdev_rcv_buffer) # msg = await self.ifdev_rcv_buffer.get() # print(f'*****iface.read_loop: {msg.to_json()}') # await self.handle(msg) # # await asyncio.sleep(.1) @abc.abstractmethod async def handle(self, msg): pass def get_signature(self): # This will combine instrument metadata to generate # a unique # ID return self.name+":"+self.label # def create_msg_buffers(self, config=None): # # self.read_buffer = MessageBuffer(config=config) # self.ifdev_send_buffer = asyncio.Queue(loop=self.loop) # self.ifdev_rcv_buffer = asyncio.Queue(loop=self.loop) def get_id(self): id = self.__class__.__name__ if self.label is not None: id += ":"+self.label return id # class Interface(abc.ABC): # class_type = 'INTERFACE' # def __init__(self, config): # print('Interface init') # self.loop = asyncio.get_event_loop() # print(config) # self.config = config # # if no NAME given, create one with address # self.label = self.config['LABEL'] # # Message buffers (Queues) # self.msg_send_buffer = None # self.msg_rcv_buffer = None # self.ifdev_send_buffer = None # self.ifdev_rcv_buffer = None # self.gui_send_buffer = None # # self.create_msg_buffer() # # self.read_q = config['IF_READ_Q'] # # self.write_q = config['IF_WRITE_Q'] # self.task_list = [] # self.dev_manager = Managers().get('IFDeviceManager') # print(f'dev_manager: {self.dev_manager}') # self.ifdevice = None # # print(self.dev_mananger) # def connect(self, cmd=None): # pass # def start(self, cmd=None): # print('Starting Interface') # task = asyncio.ensure_future(self.read_loop()) # self.task_list.append(task) # if self.ifdevice is not None: # self.ifdevice.start() # def read(self, cmd=None): # pass # def write(self, cmd): # pass # # TODO: howto clean up managers? # def stop(self, cmd=None): # # for instrument in self.inst_map: # # # print(sensor) # # instrument.stop() # # tasks = asyncio.Task.all_tasks() # for t in self.task_list: # # print(t) # t.cancel() # def disconnect(self, cmd=None): # pass # async def read_loop(self): # print('*****Interface: starting read_loop') # while True: # print(self.ifdev_rcv_buffer) # msg = await self.ifdev_rcv_buffer.get() # print(f'*****iface.read_loop: {msg.to_json()}') # await self.handle(msg) # # await asyncio.sleep(.1) # @abc.abstractmethod # async def handle(self, msg): # pass # def create_msg_buffers(self, config=None): # # self.read_buffer = MessageBuffer(config=config) # self.ifdev_send_buffer = asyncio.Queue(loop=self.loop) # self.ifdev_rcv_buffer = asyncio.Queue(loop=self.loop) # def get_id(self): # id = self.__class__.__name__ # if self.label is not None: # id += ":"+self.label # return id class DummyInterface(Interface): class_type = 'DUMMY_INTERFACE' def __init__(self, config, ui_config=None, **kwargs): super(DummyInterface, self).__init__(config, ui_config=ui_config, **kwargs) # ifdev_config = json.loads('{"IFDEVICE": {"MODULE": "daq.interface.ifdevice", "CLASS": "DummyIFDevice"}, "IFDEVCONFIG": {"DESCRIPTION": {"LABEL": "Dummy IFDevice", "SERIAL_NUMBER": "1234", "PROPERTY_NUMBER": "CD0001234"}}}') # ui_config = dict() # ui_config['do_ui_connection'] = False # # self.dev_mananger # print('DummyInterface init') # # self.ifdevice = self.dev_mananger.create('DummyIFDevice', config) # # print(self.dev_manager) # self.ifdevice = self.ifdevice_manager.create('DummyIFDevice', ifdev_config, ui_config=ui_config) # # self.idevice = IFDeviceFactory().create(ifdev_config, ui_config=None) # # self.idevice = DummyIFDevice(ifdev_config, ui_config=None) # # print(f'ifdevice: {self.ifdevice}') # self.create_msg_buffers() # # in order to make sense, child:send == parent:rcv # # self.ifdevice.msg_send_buffer = self.ifdev_rcv_buffer # # if self.ifdevice is not None: # self.ifdevice.to_parent_buf = self.ifdev_rcv_buffer # print(self.ifdevice.to_parent_buf) self.name = 'DummyInterface' # self.host = "localhost" # self.port = 1 try: self.uri = config["URI"] parts = self.uri.split(":") self.host = parts[0] self.port = parts[1] except KeyError: self.host = "localhost" self.port = 1 # if 'dummy_port' in config: # self.port = config['dummy_port'] self.setup() def setup(self): super().setup() def add_ifdevice(self): print('Add ifdevice') # print(f'config = {self.config['IFACE_LIST']}') # for k, ifcfg in self.config['IFACE_LIST'].items(): # # self.iface_map[iface.name] = iface # # print(ifcfg['IFACE_CONFIG']) # # print(ifcfg['INTERFACE']) # # iface = InterfaceFactory().create(ifcfg['IFACE_CONFIG']) # iface = InterfaceFactory().create(ifcfg) # print(f'iface: {iface}') # # iface.msg_buffer = self.iface_rcv_buffer # iface.msg_send_buffer = self.from_child_buf # self.iface_map[iface.get_id()] = iface # TODO: remove hardcode and use config # ifdev_config = json.loads( # '{"IFDEVICE": {"MODULE": "daq.interface.ifdevice", "CLASS": "DummyIFDevice"},' # ' "IFDEVCONFIG": {"DESCRIPTION": {"LABEL": "Dummy IFDevice", "SERIAL_NUMBER": "1234", "PROPERTY_NUMBER": "CD0001234"}}}' # ) ifdev_config = { "IFDEVICE": { "MODULE": "daq.interface.ifdevice", "CLASS": "DummyIFDevice" }, "IFDEVCONFIG": { "DESCRIPTION": { "LABEL": self.label, "HOST": self.host, "PORT": self.port, } } } ui_config = dict() # ui_config['do_ui_connection'] = False self.ifdevice = self.ifdevice_manager.create( 'DummyIFDevice', ifdev_config, ui_config=self.ui_config, **self.kwargs ) # self.ifdevice.to_parent_buf = self.from_child_buf def start(self, cmd=None): print('Starting Interface') super().start(cmd) if self.ifdevice is not None: self.ifdevice.start() # TODO: howto clean up managers? def stop(self, cmd=None): if self.ifdevice is not None: self.ifdevice.stop() super().stop(cmd) async def handle(self, msg, type=None): # interface will know if msg is json or object # check header to see if data to be sent to instrument # - if yes, add timestamp # print('type: {}'.format(msg.type)) if (type == 'FromChild' and msg.type == IFDevice.class_type): msg.type = Interface.class_type msg.sender_id = self.get_id() if (msg.subject == 'DATA'): # update could be done in base class msg.update(msgtype=Interface.class_type) msg.body['DATETIME'] = util.dt_to_string() # print(f'^^^^ DummyInterface: {msg.to_json()}') # self.msg_buffer.put_nowait(msg) # await self.msg_send_buffer.put(msg) await self.message_to_parent(msg) elif type == 'FromParent': # print(f'message{msg.subject}, {msg.body}') pass else: print(f'Unknown Message type: {msg.type}, {msg.to_json()}') pass def get_definition_instance(self): return DummyInterface.get_definition() def get_definition(): pass class SerialPortInterface(Interface): class_type = 'SERIALPORT_INTERFACE' def __init__( self, config, ui_config=None, **kwargs ): super(SerialPortInterface, self).__init__( config, ui_config=ui_config, **kwargs ) self.name = 'SerialPortInterface' self.label = config['LABEL'] # self.address = config['ADDRESS'] self.uri = config["URI"] parts = self.uri.split(":") if len(parts) > 1: self.host = parts[0] self.address = parts[1] else: self.host = "localhost" self.address = parts[0] self.baudrate = 9600 if 'baudrate' in config: self.baudrate = config['baudrate'] self.bytesize = 8 if 'bytesize' in config: self.bytesize = config['bytesize'] self.parity = 'N' if 'parity' in config: self.parity = config['parity'] self.stopbits = 1 if 'stopbits' in config: self.stopbits = config['stopbits'] self.xonxoff = 0 if 'xonxoff' in config: self.xonxoff = config['xonxoff'] self.rtscts = 0 if 'rtscts' in config: self.rtscts = config['rtscts'] self.setup() def setup(self): super().setup() def add_ifdevice(self): print('Add ifdevice') # TODO: remove hardcode and use config # TODO: add baud rate, etc from config ifdev_config = { "IFDEVICE": { "MODULE": "daq.interface.ifdevice", "CLASS": "SerialPortIFDevice" }, "IFDEVCONFIG": { "DESCRIPTION": { "LABEL": self.label, "DEVPATH": self.address, "baudrate": self.baudrate, "bytesize": self.bytesize, "parity": self.parity, "stopbits": self.stopbits, "xonxoff": self.xonxoff, "rtscts": self.rtscts, } } } # ifdev_config = json.loads( # '{"IFDEVICE": {"MODULE": "daq.interface.ifdevice", "CLASS": "SerialPortIFDevice"},' # ' "IFDEVCONFIG": {"DESCRIPTION": {"LABEL": "SerialPort IFDevice", "DEVPATH": "/dev/ttyUSB0"}}}' # ) # ui_config = dict() # ui_config['do_ui_connection'] = False self.ifdevice = self.ifdevice_manager.create( 'SerialPortIFDevice', ifdev_config, ui_config=self.ui_config, **self.kwargs ) # print(f'{self.kwargs}') # self.ifdevice.register_parent( # self.get_id(), # to_parent_buffer=self.from_child_buf # ) # self.ifdevice.to_parent_buf = self.from_child_buf async def handle(self, msg, type=None): # interface will know if msg is json or object # check header to see if data to be sent to instrument # - if yes, add timestamp # print('type: {}'.format(msg.type)) # print(f'SerialPort.handle: {msg.to_json()}') if (type == 'FromChild' and msg.type == IFDevice.class_type): msg.type = Interface.class_type msg.sender_id = self.get_id() if (msg.subject == 'DATA'): # update could be done in base class msg.update(msgtype=Interface.class_type) msg.body['DATETIME'] = util.dt_to_string() # print(f'Serial: {msg.to_json()}') # self.msg_buffer.put_nowait(msg) # await self.msg_send_buffer.put(msg) await self.message_to_parent(msg) elif type == 'FromParent': if msg.subject == 'SEND': await self.ifdevice.message_from_parent(msg) # print(f'55555message:{msg.subject}, {msg.body}') else: print(f'Unknown Message type: {msg.type}, {msg.to_json()}') def get_definition_instance(self): return DummyInterface.get_definition() def get_definition(): pass class TCPPortInterface(Interface): class_type = 'TCPPORT_INTERFACE' def __init__( self, config, ui_config=None, **kwargs ): super(TCPPortInterface, self).__init__( config, ui_config=ui_config, **kwargs ) self.name = 'TCPPortInterface' self.label = config['LABEL'] self.uri = config["URI"] parts = self.uri.split(":") self.host = parts[0] self.port = parts[1] # self.host = 'localhost' # if 'HOST' in config: # self.host = config['HOST'] # self.port = 4001 # if 'PORT' in config: # self.port = config['PORT'] self.address = (self.host, self.port) self.setup() def setup(self): super().setup() def add_ifdevice(self): print('Add ifdevice') # print(f'config = {self.config['IFACE_LIST']}') # for k, ifcfg in self.config['IFACE_LIST'].items(): # # self.iface_map[iface.name] = iface # # print(ifcfg['IFACE_CONFIG']) # # print(ifcfg['INTERFACE']) # # iface = InterfaceFactory().create(ifcfg['IFACE_CONFIG']) # iface = InterfaceFactory().create(ifcfg) # print(f'iface: {iface}') # # iface.msg_buffer = self.iface_rcv_buffer # iface.msg_send_buffer = self.from_child_buf # self.iface_map[iface.get_id()] = iface # TODO: remove hardcode and use config # TODO: add baud rate, etc from config ifdev_config = { "IFDEVICE": { "MODULE": "daq.interface.ifdevice", "CLASS": "TCPPortIFDevice" }, "IFDEVCONFIG": { "DESCRIPTION": { "LABEL": self.label, "ADDRESS": self.address } } } # ifdev_config = json.loads( # '{"IFDEVICE": {"MODULE": "daq.interface.ifdevice", "CLASS": "SerialPortIFDevice"},' # ' "IFDEVCONFIG": {"DESCRIPTION": {"LABEL": "SerialPort IFDevice", "DEVPATH": "/dev/ttyUSB0"}}}' # ) ui_config = dict() # ui_config['do_ui_connection'] = False self.ifdevice = self.ifdevice_manager.create( 'TCPPortIFDevice', ifdev_config, ui_config=self.ui_config, **self.kwargs ) # self.ifdevice.register_parent( # self.get_id(), # to_parent_buffer=self.from_child_buf # ) # self.ifdevice.to_parent_buf = self.from_child_buf async def handle(self, msg, type=None): # interface will know if msg is json or object # check header to see if data to be sent to instrument # - if yes, add timestamp # print('type: {}'.format(msg.type)) if (type == 'FromChild' and msg.type == IFDevice.class_type): msg.type = Interface.class_type msg.sender_id = self.get_id() if (msg.subject == 'DATA'): # update could be done in base class msg.update(msgtype=Interface.class_type) msg.body['DATETIME'] = util.dt_to_string() # print(f'TCP: {msg.to_json()}') # self.msg_buffer.put_nowait(msg) # await self.msg_send_buffer.put(msg) # print(f'tcpif to parent: {msg.body}') await self.message_to_parent(msg) elif type == 'FromParent': # print(f'message{msg.subject}, {msg.body}') # print(f'tcpif from parent: {msg}') await self.ifdevice.message_from_parent(msg) else: print(f'Unknown Message type: {msg.type}, {msg.to_json()}') def get_definition_instance(self): return DummyInterface.get_definition() def get_definition(): pass # class DummyInterface(Interface): # def __init__(self, config): # super().__init__(config) # ifdev_config = json.loads('{"IFDEVICE": {"MODULE": "daq.interface.ifdevice", "CLASS": "DummyIFDevice"}, "IFDEVCONFIG": {"DESCRIPTION": {"LABEL": "Dummy IFDevice", "SERIAL_NUMBER": "1234", "PROPERTY_NUMBER": "CD0001234"}}}') # ui_config = dict() # ui_config['do_ui_connection'] = False # # self.dev_mananger # print('DummyInterface init') # # self.ifdevice = self.dev_mananger.create('DummyIFDevice', config) # # print(self.dev_manager) # self.ifdevice = self.ifdevice_manager.create('DummyIFDevice', ifdev_config, ui_config=ui_config) # # self.idevice = IFDeviceFactory().create(ifdev_config, ui_config=None) # # self.idevice = DummyIFDevice(ifdev_config, ui_config=None) # # print(f'ifdevice: {self.ifdevice}') # self.create_msg_buffers() # # in order to make sense, child:send == parent:rcv # # self.ifdevice.msg_send_buffer = self.ifdev_rcv_buffer # # if self.ifdevice is not None: # self.ifdevice.to_parent_buf = self.ifdev_rcv_buffer # print(self.ifdevice.to_parent_buf) # async def handle(self, msg): # # interface will know if msg is json or object # # check header to see if data to be sent to instrument # # - if yes, add timestamp # print('type: {}'.format(msg.type)) # if (msg.type == IFDevice.class_type): # msg.type = Interface.class_type # msg.sender_id = self.get_id() # if (msg.subject == 'DATA'): # # update could be done in base class # msg.update(msgtype=Interface.class_type) # msg.body['DATETIME'] = util.dt_to_string() # print(f'DummyInterface: {msg.to_json()}') # # self.msg_buffer.put_nowait(msg) # await self.msg_send_buffer.put(msg) # else: # print('Unknown Message type: {}'.format(msg.type)) class LabJackT7Interface(Interface): class_type = 'LABJACKT7_INTERFACE' def __init__(self, config, ui_config=None, **kwargs): super(LabJackT7Interface, self).__init__(config, ui_config=ui_config, **kwargs) # ifdev_config = json.loads('{"IFDEVICE": {"MODULE": "daq.interface.ifdevice", "CLASS": "DummyIFDevice"}, "IFDEVCONFIG": {"DESCRIPTION": {"LABEL": "Dummy IFDevice", "SERIAL_NUMBER": "1234", "PROPERTY_NUMBER": "CD0001234"}}}') # ui_config = dict() # ui_config['do_ui_connection'] = False # # self.dev_mananger # print('DummyInterface init') # # self.ifdevice = self.dev_mananger.create('DummyIFDevice', config) # # print(self.dev_manager) # self.ifdevice = self.ifdevice_manager.create('DummyIFDevice', ifdev_config, ui_config=ui_config) # # self.idevice = IFDeviceFactory().create(ifdev_config, ui_config=None) # # self.idevice = DummyIFDevice(ifdev_config, ui_config=None) # # print(f'ifdevice: {self.ifdevice}') # self.create_msg_buffers() # # in order to make sense, child:send == parent:rcv # # self.ifdevice.msg_send_buffer = self.ifdev_rcv_buffer # # if self.ifdevice is not None: # self.ifdevice.to_parent_buf = self.ifdev_rcv_buffer # print(self.ifdevice.to_parent_buf) self.name = 'LabJackT7Interface' self.label = config['LABEL'] self.address = config['ADDRESS'] self.device_type = 'T7' self.conection_type = 'ANY' if 'connection_type' in config: self.conection_type = config['connection_type'] self.identifier = 'ANY' if 'identifier' in config: self.identifier = config['identifier'] if 'serial_number' in config: self.identifier = config['serial_number'] # if 'connection_type' in config['DESCRIPTION']: # self.conection_type = config['DESCRIPTION']['connection_type'] # self.identifier = 'ANY' # if 'identifier' in config['DESCRIPTION']: # self.identifier = config['DESCRIPTION']['identifier'] # if 'serial_number' in config['DESCRIPTION']: # self.identifier = config['DESCRIPTION']['serial_number'] self.setup() def setup(self): super().setup() def add_ifdevice(self): print('Add ifdevice') # print(f'config = {self.config['IFACE_LIST']}') # for k, ifcfg in self.config['IFACE_LIST'].items(): # # self.iface_map[iface.name] = iface # # print(ifcfg['IFACE_CONFIG']) # # print(ifcfg['INTERFACE']) # # iface = InterfaceFactory().create(ifcfg['IFACE_CONFIG']) # iface = InterfaceFactory().create(ifcfg) # print(f'iface: {iface}') # # iface.msg_buffer = self.iface_rcv_buffer # iface.msg_send_buffer = self.from_child_buf # self.iface_map[iface.get_id()] = iface # TODO: remove hardcode and use config # TODO: add baud rate, etc from config ifdev_config = { "IFDEVICE": { "MODULE": "daq.interface.ifdevice", "CLASS": "LabJackT7Device" }, "IFDEVCONFIG": { "DESCRIPTION": { "LABEL": self.label, "DEVPATH": self.address, 'device_type': self.device_type, 'connection_type': self.conection_type, 'identifier': self.identifier, } } } # ifdev_config = json.loads( # '{"IFDEVICE": {"MODULE": "daq.interface.ifdevice", "CLASS": "SerialPortIFDevice"},' # ' "IFDEVCONFIG": {"DESCRIPTION": {"LABEL": "SerialPort IFDevice", "DEVPATH": "/dev/ttyUSB0"}}}' # ) ui_config = dict() # ui_config['do_ui_connection'] = False self.ifdevice = self.ifdevice_manager.create( 'LabJackT7Interface', ifdev_config, ui_config=self.ui_config, **self.kwargs ) # print(f'{self.kwargs}') # self.ifdevice.register_parent( # self.get_id(), # to_parent_buffer=self.from_child_buf # ) # self.ifdevice.to_parent_buf = self.from_child_buf async def handle(self, msg, type=None): # interface will know if msg is json or object # check header to see if data to be sent to instrument # - if yes, add timestamp # print('type: {}'.format(msg.type)) if (type == 'FromChild' and msg.type == IFDevice.class_type): msg.type = Interface.class_type msg.sender_id = self.get_id() if (msg.subject == 'DATA'): # update could be done in base class msg.update(msgtype=Interface.class_type) msg.body['DATETIME'] = util.dt_to_string() # print(f'Serial: {msg.to_json()}') # self.msg_buffer.put_nowait(msg) # await self.msg_send_buffer.put(msg) await self.message_to_parent(msg) elif type == 'FromParent': if msg.subject == 'SEND': await self.ifdevice.message_from_parent(msg) # print(f'55555message:{msg.subject}, {msg.body}') else: print(f'Unknown Message type: {msg.type}, {msg.to_json()}') def get_definition_instance(self): return LabJackT7Interface.get_definition() def get_definition(): pass if __name__ == "__main__": config = { 'INTERFACE': { 'MODULE': 'daq.interface', 'CLASS': 'DummyInterface', }, 'IFCONFIG': { 'ADDRESS': 'DummyAddress', 'SerialNumber': '1234' } } # print(config['IFTYPE']) # print(config['IFCONFIG']) iface = InterfaceFactory() iface.create(config)
import abc # import asyncio import shared.utilities.util as util # import json import importlib # import sys from daq.daq import DAQ from daq.interface.ifdevice import IFDevice, DummyIFDevice, IFDeviceFactory from daq.manager.manager import Managers class InterfaceFactory(): @staticmethod def create(config, ui_config=None, **kwargs): print(config) create_cfg = config['INTERFACE'] ifconfig = config['IFCONFIG'] # inst_config = config['instrument'] # print("module: " + create_cfg['MODULE']) # print("class: " + create_cfg['CLASS']) try: # print('Creating: ' + config['name']) # print(' ClassName: ' + config['class']) mod_ = importlib.import_module(create_cfg['MODULE']) # print(f'mod: {mod_}') cls_ = getattr(mod_, create_cfg['CLASS']) # print(f'cls: {cls_}') # inst_class = eval(config['class']) # return inst_class.factory_create() test = cls_(ifconfig, ui_config=ui_config, **kwargs) # print(test) return test except Exception as e: print(f"Interface: Unexpected error: {e}") raise class Interface(DAQ): class_type = 'INTERFACE' def __init__(self, config, ui_config=None, **kwargs): super(Interface, self).__init__(config, ui_config=ui_config, **kwargs) print('Interface init') self.name = 'Interface' self.kwargs = kwargs # print(config) # self.config = config # if no NAME given, create one with address # self.label = self.config['LABEL'] # Message buffers (Queues) # self.msg_send_buffer = None # self.msg_rcv_buffer = None # self.ifdev_send_buffer = None # self.ifdev_rcv_buffer = None # self.gui_send_buffer = None # # self.create_msg_buffer() # # self.read_q = config['IF_READ_Q'] # # self.write_q = config['IF_WRITE_Q'] # self.task_list = [] self.ifdevice_manager = Managers().get('IFDeviceManager') print(f'device_manager: {self.ifdevice_manager}') self.ifdevice = None # print(self.dev_mananger) def setup(self): self.do_ui_connection = False super().setup() self.add_ifdevice() @abc.abstractmethod def add_ifdevice(self): pass # print('Add ifdevice') # # print(f'config = {self.config["IFACE_LIST"]}') # # for k, ifcfg in self.config['IFACE_LIST'].items(): # # # self.iface_map[iface.name] = iface # # # print(ifcfg['IFACE_CONFIG']) # # # print(ifcfg['INTERFACE']) # # # iface = InterfaceFactory().create(ifcfg['IFACE_CONFIG']) # # iface = InterfaceFactory().create(ifcfg) # # print(f'iface: {iface}') # # # iface.msg_buffer = self.iface_rcv_buffer # # iface.msg_send_buffer = self.from_child_buf # # self.iface_map[iface.get_id()] = iface # # TODO: remove hardcode and use config # ifdev_config = json.loads( # '{"IFDEVICE": {"MODULE": "daq.interface.ifdevice", "CLASS": "DummyIFDevice"}, "IFDEVCONFIG": {"DESCRIPTION": {"LABEL": "Dummy IFDevice", "SERIAL_NUMBER": "1234", "PROPERTY_NUMBER": "CD0001234"}}}') # ui_config = dict() # # ui_config['do_ui_connection'] = False # self.ifdevice = self.ifdevice_manager.create( # 'DummyIFDevice', ifdev_config, ui_config=ui_config) # self.ifdevice.to_parent_buf = self.from_child_buf def get_ui_address(self): print(self.label) address = 'envdaq/interface/'+self.label+'/' print(f'get_ui_address: {address}') return address # def connect(self, cmd=None): # pass def enable(self): print('Enabling Interface') super().enable() self.ifdevice.register_parent( self.get_id(), to_parent_buffer=self.from_child_buf ) def disable(self): print("interface disable dereg parent") self.ifdevice.deregister_parent(self.get_id()) print("interface disable super") super().disable() def start(self, cmd=None): print('Starting Interface') super().start(cmd) # task = asyncio.ensure_future(self.read_loop()) # self.task_list.append(task) # if self.ifdevice is not None: # self.ifdevice.start() # Changed to allow multiple interface instances # for a given device. Device will run as long # as there are interfaces registered # self.ifdevice.register_parent( # self.get_id(), # to_parent_buffer=self.from_child_buf # ) # def read(self, cmd=None): # pass # def write(self, cmd): # pass # TODO: howto clean up managers? def stop(self, cmd=None): # for instrument in self.inst_map: # # print(sensor) # instrument.stop() # tasks = asyncio.Task.all_tasks() # for t in self.task_list: # # print(t) # t.cancel() # if self.ifdevice is not None: # self.ifdevice.stop() # self.ifdevice.deregister_parent(self.get_id()) super().stop(cmd) async def shutdown(self): pass # for instrument in self.inst_map: # # print(sensor) # instrument.stop() # tasks = asyncio.Task.all_tasks() # for t in self.task_list: # # print(t) # t.cancel() # if self.ifdevice is not None: # self.ifdevice.deregister_parent(self.get_id()) # self.ifdevice.shutdown() # while self.status2.get_registration_status() != Status.UNREGISTERING: # TODO use if we start connecting to UI # await super().shutdown() # def disconnect(self, cmd=None): # pass # async def read_loop(self): # print('*****Interface: starting read_loop') # while True: # print(self.ifdev_rcv_buffer) # msg = await self.ifdev_rcv_buffer.get() # print(f'*****iface.read_loop: {msg.to_json()}') # await self.handle(msg) # # await asyncio.sleep(.1) @abc.abstractmethod async def handle(self, msg): pass def get_signature(self): # This will combine instrument metadata to generate # a unique # ID return self.name+":"+self.label # def create_msg_buffers(self, config=None): # # self.read_buffer = MessageBuffer(config=config) # self.ifdev_send_buffer = asyncio.Queue(loop=self.loop) # self.ifdev_rcv_buffer = asyncio.Queue(loop=self.loop) def get_id(self): id = self.__class__.__name__ if self.label is not None: id += ":"+self.label return id # class Interface(abc.ABC): # class_type = 'INTERFACE' # def __init__(self, config): # print('Interface init') # self.loop = asyncio.get_event_loop() # print(config) # self.config = config # # if no NAME given, create one with address # self.label = self.config['LABEL'] # # Message buffers (Queues) # self.msg_send_buffer = None # self.msg_rcv_buffer = None # self.ifdev_send_buffer = None # self.ifdev_rcv_buffer = None # self.gui_send_buffer = None # # self.create_msg_buffer() # # self.read_q = config['IF_READ_Q'] # # self.write_q = config['IF_WRITE_Q'] # self.task_list = [] # self.dev_manager = Managers().get('IFDeviceManager') # print(f'dev_manager: {self.dev_manager}') # self.ifdevice = None # # print(self.dev_mananger) # def connect(self, cmd=None): # pass # def start(self, cmd=None): # print('Starting Interface') # task = asyncio.ensure_future(self.read_loop()) # self.task_list.append(task) # if self.ifdevice is not None: # self.ifdevice.start() # def read(self, cmd=None): # pass # def write(self, cmd): # pass # # TODO: howto clean up managers? # def stop(self, cmd=None): # # for instrument in self.inst_map: # # # print(sensor) # # instrument.stop() # # tasks = asyncio.Task.all_tasks() # for t in self.task_list: # # print(t) # t.cancel() # def disconnect(self, cmd=None): # pass # async def read_loop(self): # print('*****Interface: starting read_loop') # while True: # print(self.ifdev_rcv_buffer) # msg = await self.ifdev_rcv_buffer.get() # print(f'*****iface.read_loop: {msg.to_json()}') # await self.handle(msg) # # await asyncio.sleep(.1) # @abc.abstractmethod # async def handle(self, msg): # pass # def create_msg_buffers(self, config=None): # # self.read_buffer = MessageBuffer(config=config) # self.ifdev_send_buffer = asyncio.Queue(loop=self.loop) # self.ifdev_rcv_buffer = asyncio.Queue(loop=self.loop) # def get_id(self): # id = self.__class__.__name__ # if self.label is not None: # id += ":"+self.label # return id class DummyInterface(Interface): class_type = 'DUMMY_INTERFACE' def __init__(self, config, ui_config=None, **kwargs): super(DummyInterface, self).__init__(config, ui_config=ui_config, **kwargs) # ifdev_config = json.loads('{"IFDEVICE": {"MODULE": "daq.interface.ifdevice", "CLASS": "DummyIFDevice"}, "IFDEVCONFIG": {"DESCRIPTION": {"LABEL": "Dummy IFDevice", "SERIAL_NUMBER": "1234", "PROPERTY_NUMBER": "CD0001234"}}}') # ui_config = dict() # ui_config['do_ui_connection'] = False # # self.dev_mananger # print('DummyInterface init') # # self.ifdevice = self.dev_mananger.create('DummyIFDevice', config) # # print(self.dev_manager) # self.ifdevice = self.ifdevice_manager.create('DummyIFDevice', ifdev_config, ui_config=ui_config) # # self.idevice = IFDeviceFactory().create(ifdev_config, ui_config=None) # # self.idevice = DummyIFDevice(ifdev_config, ui_config=None) # # print(f'ifdevice: {self.ifdevice}') # self.create_msg_buffers() # # in order to make sense, child:send == parent:rcv # # self.ifdevice.msg_send_buffer = self.ifdev_rcv_buffer # # if self.ifdevice is not None: # self.ifdevice.to_parent_buf = self.ifdev_rcv_buffer # print(self.ifdevice.to_parent_buf) self.name = 'DummyInterface' # self.host = "localhost" # self.port = 1 try: self.uri = config["URI"] parts = self.uri.split(":") self.host = parts[0] self.port = parts[1] except KeyError: self.host = "localhost" self.port = 1 # if 'dummy_port' in config: # self.port = config['dummy_port'] self.setup() def setup(self): super().setup() def add_ifdevice(self): print('Add ifdevice') # print(f'config = {self.config["IFACE_LIST"]}') # for k, ifcfg in self.config['IFACE_LIST'].items(): # # self.iface_map[iface.name] = iface # # print(ifcfg['IFACE_CONFIG']) # # print(ifcfg['INTERFACE']) # # iface = InterfaceFactory().create(ifcfg['IFACE_CONFIG']) # iface = InterfaceFactory().create(ifcfg) # print(f'iface: {iface}') # # iface.msg_buffer = self.iface_rcv_buffer # iface.msg_send_buffer = self.from_child_buf # self.iface_map[iface.get_id()] = iface # TODO: remove hardcode and use config # ifdev_config = json.loads( # '{"IFDEVICE": {"MODULE": "daq.interface.ifdevice", "CLASS": "DummyIFDevice"},' # ' "IFDEVCONFIG": {"DESCRIPTION": {"LABEL": "Dummy IFDevice", "SERIAL_NUMBER": "1234", "PROPERTY_NUMBER": "CD0001234"}}}' # ) ifdev_config = { "IFDEVICE": { "MODULE": "daq.interface.ifdevice", "CLASS": "DummyIFDevice" }, "IFDEVCONFIG": { "DESCRIPTION": { "LABEL": self.label, "HOST": self.host, "PORT": self.port, } } } ui_config = dict() # ui_config['do_ui_connection'] = False self.ifdevice = self.ifdevice_manager.create( 'DummyIFDevice', ifdev_config, ui_config=self.ui_config, **self.kwargs ) # self.ifdevice.to_parent_buf = self.from_child_buf def start(self, cmd=None): print('Starting Interface') super().start(cmd) if self.ifdevice is not None: self.ifdevice.start() # TODO: howto clean up managers? def stop(self, cmd=None): if self.ifdevice is not None: self.ifdevice.stop() super().stop(cmd) async def handle(self, msg, type=None): # interface will know if msg is json or object # check header to see if data to be sent to instrument # - if yes, add timestamp # print('type: {}'.format(msg.type)) if (type == 'FromChild' and msg.type == IFDevice.class_type): msg.type = Interface.class_type msg.sender_id = self.get_id() if (msg.subject == 'DATA'): # update could be done in base class msg.update(msgtype=Interface.class_type) msg.body['DATETIME'] = util.dt_to_string() # print(f'^^^^ DummyInterface: {msg.to_json()}') # self.msg_buffer.put_nowait(msg) # await self.msg_send_buffer.put(msg) await self.message_to_parent(msg) elif type == 'FromParent': # print(f'message{msg.subject}, {msg.body}') pass else: print(f'Unknown Message type: {msg.type}, {msg.to_json()}') pass def get_definition_instance(self): return DummyInterface.get_definition() def get_definition(): pass class SerialPortInterface(Interface): class_type = 'SERIALPORT_INTERFACE' def __init__( self, config, ui_config=None, **kwargs ): super(SerialPortInterface, self).__init__( config, ui_config=ui_config, **kwargs ) self.name = 'SerialPortInterface' self.label = config['LABEL'] # self.address = config['ADDRESS'] self.uri = config["URI"] parts = self.uri.split(":") if len(parts) > 1: self.host = parts[0] self.address = parts[1] else: self.host = "localhost" self.address = parts[0] self.baudrate = 9600 if 'baudrate' in config: self.baudrate = config['baudrate'] self.bytesize = 8 if 'bytesize' in config: self.bytesize = config['bytesize'] self.parity = 'N' if 'parity' in config: self.parity = config['parity'] self.stopbits = 1 if 'stopbits' in config: self.stopbits = config['stopbits'] self.xonxoff = 0 if 'xonxoff' in config: self.xonxoff = config['xonxoff'] self.rtscts = 0 if 'rtscts' in config: self.rtscts = config['rtscts'] self.setup() def setup(self): super().setup() def add_ifdevice(self): print('Add ifdevice') # TODO: remove hardcode and use config # TODO: add baud rate, etc from config ifdev_config = { "IFDEVICE": { "MODULE": "daq.interface.ifdevice", "CLASS": "SerialPortIFDevice" }, "IFDEVCONFIG": { "DESCRIPTION": { "LABEL": self.label, "DEVPATH": self.address, "baudrate": self.baudrate, "bytesize": self.bytesize, "parity": self.parity, "stopbits": self.stopbits, "xonxoff": self.xonxoff, "rtscts": self.rtscts, } } } # ifdev_config = json.loads( # '{"IFDEVICE": {"MODULE": "daq.interface.ifdevice", "CLASS": "SerialPortIFDevice"},' # ' "IFDEVCONFIG": {"DESCRIPTION": {"LABEL": "SerialPort IFDevice", "DEVPATH": "/dev/ttyUSB0"}}}' # ) # ui_config = dict() # ui_config['do_ui_connection'] = False self.ifdevice = self.ifdevice_manager.create( 'SerialPortIFDevice', ifdev_config, ui_config=self.ui_config, **self.kwargs ) # print(f'{self.kwargs}') # self.ifdevice.register_parent( # self.get_id(), # to_parent_buffer=self.from_child_buf # ) # self.ifdevice.to_parent_buf = self.from_child_buf async def handle(self, msg, type=None): # interface will know if msg is json or object # check header to see if data to be sent to instrument # - if yes, add timestamp # print('type: {}'.format(msg.type)) # print(f'SerialPort.handle: {msg.to_json()}') if (type == 'FromChild' and msg.type == IFDevice.class_type): msg.type = Interface.class_type msg.sender_id = self.get_id() if (msg.subject == 'DATA'): # update could be done in base class msg.update(msgtype=Interface.class_type) msg.body['DATETIME'] = util.dt_to_string() # print(f'Serial: {msg.to_json()}') # self.msg_buffer.put_nowait(msg) # await self.msg_send_buffer.put(msg) await self.message_to_parent(msg) elif type == 'FromParent': if msg.subject == 'SEND': await self.ifdevice.message_from_parent(msg) # print(f'55555message:{msg.subject}, {msg.body}') else: print(f'Unknown Message type: {msg.type}, {msg.to_json()}') def get_definition_instance(self): return DummyInterface.get_definition() def get_definition(): pass class TCPPortInterface(Interface): class_type = 'TCPPORT_INTERFACE' def __init__( self, config, ui_config=None, **kwargs ): super(TCPPortInterface, self).__init__( config, ui_config=ui_config, **kwargs ) self.name = 'TCPPortInterface' self.label = config['LABEL'] self.uri = config["URI"] parts = self.uri.split(":") self.host = parts[0] self.port = parts[1] # self.host = 'localhost' # if 'HOST' in config: # self.host = config['HOST'] # self.port = 4001 # if 'PORT' in config: # self.port = config['PORT'] self.address = (self.host, self.port) self.setup() def setup(self): super().setup() def add_ifdevice(self): print('Add ifdevice') # print(f'config = {self.config["IFACE_LIST"]}') # for k, ifcfg in self.config['IFACE_LIST'].items(): # # self.iface_map[iface.name] = iface # # print(ifcfg['IFACE_CONFIG']) # # print(ifcfg['INTERFACE']) # # iface = InterfaceFactory().create(ifcfg['IFACE_CONFIG']) # iface = InterfaceFactory().create(ifcfg) # print(f'iface: {iface}') # # iface.msg_buffer = self.iface_rcv_buffer # iface.msg_send_buffer = self.from_child_buf # self.iface_map[iface.get_id()] = iface # TODO: remove hardcode and use config # TODO: add baud rate, etc from config ifdev_config = { "IFDEVICE": { "MODULE": "daq.interface.ifdevice", "CLASS": "TCPPortIFDevice" }, "IFDEVCONFIG": { "DESCRIPTION": { "LABEL": self.label, "ADDRESS": self.address } } } # ifdev_config = json.loads( # '{"IFDEVICE": {"MODULE": "daq.interface.ifdevice", "CLASS": "SerialPortIFDevice"},' # ' "IFDEVCONFIG": {"DESCRIPTION": {"LABEL": "SerialPort IFDevice", "DEVPATH": "/dev/ttyUSB0"}}}' # ) ui_config = dict() # ui_config['do_ui_connection'] = False self.ifdevice = self.ifdevice_manager.create( 'TCPPortIFDevice', ifdev_config, ui_config=self.ui_config, **self.kwargs ) # self.ifdevice.register_parent( # self.get_id(), # to_parent_buffer=self.from_child_buf # ) # self.ifdevice.to_parent_buf = self.from_child_buf async def handle(self, msg, type=None): # interface will know if msg is json or object # check header to see if data to be sent to instrument # - if yes, add timestamp # print('type: {}'.format(msg.type)) if (type == 'FromChild' and msg.type == IFDevice.class_type): msg.type = Interface.class_type msg.sender_id = self.get_id() if (msg.subject == 'DATA'): # update could be done in base class msg.update(msgtype=Interface.class_type) msg.body['DATETIME'] = util.dt_to_string() # print(f'TCP: {msg.to_json()}') # self.msg_buffer.put_nowait(msg) # await self.msg_send_buffer.put(msg) # print(f'tcpif to parent: {msg.body}') await self.message_to_parent(msg) elif type == 'FromParent': # print(f'message{msg.subject}, {msg.body}') # print(f'tcpif from parent: {msg}') await self.ifdevice.message_from_parent(msg) else: print(f'Unknown Message type: {msg.type}, {msg.to_json()}') def get_definition_instance(self): return DummyInterface.get_definition() def get_definition(): pass # class DummyInterface(Interface): # def __init__(self, config): # super().__init__(config) # ifdev_config = json.loads('{"IFDEVICE": {"MODULE": "daq.interface.ifdevice", "CLASS": "DummyIFDevice"}, "IFDEVCONFIG": {"DESCRIPTION": {"LABEL": "Dummy IFDevice", "SERIAL_NUMBER": "1234", "PROPERTY_NUMBER": "CD0001234"}}}') # ui_config = dict() # ui_config['do_ui_connection'] = False # # self.dev_mananger # print('DummyInterface init') # # self.ifdevice = self.dev_mananger.create('DummyIFDevice', config) # # print(self.dev_manager) # self.ifdevice = self.ifdevice_manager.create('DummyIFDevice', ifdev_config, ui_config=ui_config) # # self.idevice = IFDeviceFactory().create(ifdev_config, ui_config=None) # # self.idevice = DummyIFDevice(ifdev_config, ui_config=None) # # print(f'ifdevice: {self.ifdevice}') # self.create_msg_buffers() # # in order to make sense, child:send == parent:rcv # # self.ifdevice.msg_send_buffer = self.ifdev_rcv_buffer # # if self.ifdevice is not None: # self.ifdevice.to_parent_buf = self.ifdev_rcv_buffer # print(self.ifdevice.to_parent_buf) # async def handle(self, msg): # # interface will know if msg is json or object # # check header to see if data to be sent to instrument # # - if yes, add timestamp # print('type: {}'.format(msg.type)) # if (msg.type == IFDevice.class_type): # msg.type = Interface.class_type # msg.sender_id = self.get_id() # if (msg.subject == 'DATA'): # # update could be done in base class # msg.update(msgtype=Interface.class_type) # msg.body['DATETIME'] = util.dt_to_string() # print(f'DummyInterface: {msg.to_json()}') # # self.msg_buffer.put_nowait(msg) # await self.msg_send_buffer.put(msg) # else: # print('Unknown Message type: {}'.format(msg.type)) class LabJackT7Interface(Interface): class_type = 'LABJACKT7_INTERFACE' def __init__(self, config, ui_config=None, **kwargs): super(LabJackT7Interface, self).__init__(config, ui_config=ui_config, **kwargs) # ifdev_config = json.loads('{"IFDEVICE": {"MODULE": "daq.interface.ifdevice", "CLASS": "DummyIFDevice"}, "IFDEVCONFIG": {"DESCRIPTION": {"LABEL": "Dummy IFDevice", "SERIAL_NUMBER": "1234", "PROPERTY_NUMBER": "CD0001234"}}}') # ui_config = dict() # ui_config['do_ui_connection'] = False # # self.dev_mananger # print('DummyInterface init') # # self.ifdevice = self.dev_mananger.create('DummyIFDevice', config) # # print(self.dev_manager) # self.ifdevice = self.ifdevice_manager.create('DummyIFDevice', ifdev_config, ui_config=ui_config) # # self.idevice = IFDeviceFactory().create(ifdev_config, ui_config=None) # # self.idevice = DummyIFDevice(ifdev_config, ui_config=None) # # print(f'ifdevice: {self.ifdevice}') # self.create_msg_buffers() # # in order to make sense, child:send == parent:rcv # # self.ifdevice.msg_send_buffer = self.ifdev_rcv_buffer # # if self.ifdevice is not None: # self.ifdevice.to_parent_buf = self.ifdev_rcv_buffer # print(self.ifdevice.to_parent_buf) self.name = 'LabJackT7Interface' self.label = config['LABEL'] self.address = config['ADDRESS'] self.device_type = 'T7' self.conection_type = 'ANY' if 'connection_type' in config: self.conection_type = config['connection_type'] self.identifier = 'ANY' if 'identifier' in config: self.identifier = config['identifier'] if 'serial_number' in config: self.identifier = config['serial_number'] # if 'connection_type' in config['DESCRIPTION']: # self.conection_type = config['DESCRIPTION']['connection_type'] # self.identifier = 'ANY' # if 'identifier' in config['DESCRIPTION']: # self.identifier = config['DESCRIPTION']['identifier'] # if 'serial_number' in config['DESCRIPTION']: # self.identifier = config['DESCRIPTION']['serial_number'] self.setup() def setup(self): super().setup() def add_ifdevice(self): print('Add ifdevice') # print(f'config = {self.config["IFACE_LIST"]}') # for k, ifcfg in self.config['IFACE_LIST'].items(): # # self.iface_map[iface.name] = iface # # print(ifcfg['IFACE_CONFIG']) # # print(ifcfg['INTERFACE']) # # iface = InterfaceFactory().create(ifcfg['IFACE_CONFIG']) # iface = InterfaceFactory().create(ifcfg) # print(f'iface: {iface}') # # iface.msg_buffer = self.iface_rcv_buffer # iface.msg_send_buffer = self.from_child_buf # self.iface_map[iface.get_id()] = iface # TODO: remove hardcode and use config # TODO: add baud rate, etc from config ifdev_config = { "IFDEVICE": { "MODULE": "daq.interface.ifdevice", "CLASS": "LabJackT7Device" }, "IFDEVCONFIG": { "DESCRIPTION": { "LABEL": self.label, "DEVPATH": self.address, 'device_type': self.device_type, 'connection_type': self.conection_type, 'identifier': self.identifier, } } } # ifdev_config = json.loads( # '{"IFDEVICE": {"MODULE": "daq.interface.ifdevice", "CLASS": "SerialPortIFDevice"},' # ' "IFDEVCONFIG": {"DESCRIPTION": {"LABEL": "SerialPort IFDevice", "DEVPATH": "/dev/ttyUSB0"}}}' # ) ui_config = dict() # ui_config['do_ui_connection'] = False self.ifdevice = self.ifdevice_manager.create( 'LabJackT7Interface', ifdev_config, ui_config=self.ui_config, **self.kwargs ) # print(f'{self.kwargs}') # self.ifdevice.register_parent( # self.get_id(), # to_parent_buffer=self.from_child_buf # ) # self.ifdevice.to_parent_buf = self.from_child_buf async def handle(self, msg, type=None): # interface will know if msg is json or object # check header to see if data to be sent to instrument # - if yes, add timestamp # print('type: {}'.format(msg.type)) if (type == 'FromChild' and msg.type == IFDevice.class_type): msg.type = Interface.class_type msg.sender_id = self.get_id() if (msg.subject == 'DATA'): # update could be done in base class msg.update(msgtype=Interface.class_type) msg.body['DATETIME'] = util.dt_to_string() # print(f'Serial: {msg.to_json()}') # self.msg_buffer.put_nowait(msg) # await self.msg_send_buffer.put(msg) await self.message_to_parent(msg) elif type == 'FromParent': if msg.subject == 'SEND': await self.ifdevice.message_from_parent(msg) # print(f'55555message:{msg.subject}, {msg.body}') else: print(f'Unknown Message type: {msg.type}, {msg.to_json()}') def get_definition_instance(self): return LabJackT7Interface.get_definition() def get_definition(): pass if __name__ == "__main__": config = { 'INTERFACE': { 'MODULE': 'daq.interface', 'CLASS': 'DummyInterface', }, 'IFCONFIG': { 'ADDRESS': 'DummyAddress', 'SerialNumber': '1234' } } # print(config['IFTYPE']) # print(config['IFCONFIG']) iface = InterfaceFactory() iface.create(config)
# pyright: reportPropertyTypeMismatch=false from __future__ import annotations import collections from datetime import timedelta import functools import gc import json import operator import pickle import re from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Literal, Mapping, Sequence, Type, cast, final, overload, ) import warnings import weakref import numpy as np from pandas._config import config from pandas._libs import lib from pandas._libs.tslibs import ( Period, Tick, Timestamp, to_offset, ) from pandas._typing import ( ArrayLike, Axis, CompressionOptions, Dtype, DtypeArg, DtypeObj, FilePath, IndexKeyFunc, IndexLabel, IntervalClosedType, JSONSerializable, Level, Manager, NDFrameT, RandomState, Renamer, StorageOptions, T, TimedeltaConvertibleTypes, TimestampConvertibleTypes, ValueKeyFunc, WriteBuffer, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.errors import ( AbstractMethodError, InvalidIndexError, ) from pandas.util._decorators import ( deprecate_kwarg, doc, rewrite_axis_style_signature, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import ( validate_ascending, validate_bool_kwarg, validate_fillna_kwargs, validate_inclusive, ) from pandas.core.dtypes.common import ( ensure_object, ensure_platform_int, ensure_str, is_bool, is_bool_dtype, is_datetime64_any_dtype, is_datetime64tz_dtype, is_dict_like, is_dtype_equal, is_extension_array_dtype, is_float, is_list_like, is_number, is_numeric_dtype, is_re_compilable, is_scalar, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.inference import ( is_hashable, is_nested_list_like, ) from pandas.core.dtypes.missing import ( isna, notna, ) from pandas.core import ( algorithms as algos, arraylike, common as com, indexing, missing, nanops, sample, ) from pandas.core.array_algos.replace import should_use_regex from pandas.core.arrays import ExtensionArray from pandas.core.base import PandasObject from pandas.core.construction import ( create_series_with_explicit_dtype, extract_array, ) from pandas.core.describe import describe_ndframe from pandas.core.flags import Flags from pandas.core.indexes.api import ( DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, default_index, ensure_index, ) from pandas.core.internals import ( ArrayManager, BlockManager, SingleArrayManager, ) from pandas.core.internals.construction import mgr_to_mgr from pandas.core.missing import find_valid_index from pandas.core.ops import align_method_FRAME from pandas.core.reshape.concat import concat from pandas.core.shared_docs import _shared_docs from pandas.core.sorting import get_indexer_indexer from pandas.core.window import ( Expanding, ExponentialMovingWindow, Rolling, Window, ) from pandas.io.formats import format as fmt from pandas.io.formats.format import ( DataFrameFormatter, DataFrameRenderer, ) from pandas.io.formats.printing import pprint_thing if TYPE_CHECKING: from pandas._libs.tslibs import BaseOffset from pandas.core.frame import DataFrame from pandas.core.indexers.objects import BaseIndexer from pandas.core.resample import Resampler from pandas.core.series import Series # goal is to be able to define the docs close to function, while still being # able to share _shared_docs = {**_shared_docs} _shared_doc_kwargs = { "axes": "keywords for axes", "klass": "Series/DataFrame", "axes_single_arg": "int or labels for object", "args_transpose": "axes to permute (int or label for object)", "inplace": """ inplace : bool, default False If True, performs operation inplace and returns None.""", "optional_by": """ by : str or list of str Name or list of names to sort by""", "replace_iloc": """ This differs from updating with ``.loc`` or ``.iloc``, which require you to specify a location to update with some value.""", } bool_t = bool # Need alias because NDFrame has def bool: class NDFrame(PandasObject, indexing.IndexingMixin): """ N-dimensional analogue of DataFrame. Store multi-dimensional in a size-mutable, labeled data structure Parameters ---------- data : BlockManager axes : list copy : bool, default False """ _internal_names: list[str] = [ "_mgr", "_cacher", "_item_cache", "_cache", "_is_copy", "_subtyp", "_name", "_default_kind", "_default_fill_value", "_metadata", "__array_struct__", "__array_interface__", "_flags", ] _internal_names_set: set[str] = set(_internal_names) _accessors: set[str] = set() _hidden_attrs: frozenset[str] = frozenset( ["_AXIS_NAMES", "_AXIS_NUMBERS", "get_values", "tshift"] ) _metadata: list[str] = [] _is_copy: weakref.ReferenceType[NDFrame] | None = None _mgr: Manager _attrs: dict[Hashable, Any] _typ: str # ---------------------------------------------------------------------- # Constructors def __init__( self, data: Manager, copy: bool_t = False, attrs: Mapping[Hashable, Any] | None = None, ): # copy kwarg is retained for mypy compat, is not used object.__setattr__(self, "_is_copy", None) object.__setattr__(self, "_mgr", data) object.__setattr__(self, "_item_cache", {}) if attrs is None: attrs = {} else: attrs = dict(attrs) object.__setattr__(self, "_attrs", attrs) object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True)) @classmethod def _init_mgr( cls, mgr: Manager, axes, dtype: Dtype | None = None, copy: bool_t = False, ) -> Manager: """passed a manager and a axes dict""" for a, axe in axes.items(): if axe is not None: axe = ensure_index(axe) bm_axis = cls._get_block_manager_axis(a) mgr = mgr.reindex_axis(axe, axis=bm_axis) # make a copy if explicitly requested if copy: mgr = mgr.copy() if dtype is not None: # avoid further copies if we can if ( isinstance(mgr, BlockManager) and len(mgr.blocks) == 1 and is_dtype_equal(mgr.blocks[0].values.dtype, dtype) ): pass else: mgr = mgr.astype(dtype=dtype) return mgr def _as_manager(self: NDFrameT, typ: str, copy: bool_t = True) -> NDFrameT: """ Private helper function to create a DataFrame with specific manager. Parameters ---------- typ : {"block", "array"} copy : bool, default True Only controls whether the conversion from Block->ArrayManager copies the 1D arrays (to ensure proper/contiguous memory layout). Returns ------- DataFrame New DataFrame using specified manager type. Is not guaranteed to be a copy or not. """ new_mgr: Manager new_mgr = mgr_to_mgr(self._mgr, typ=typ, copy=copy) # fastpath of passing a manager doesn't check the option/manager class return self._constructor(new_mgr).__finalize__(self) # ---------------------------------------------------------------------- # attrs and flags @property def attrs(self) -> dict[Hashable, Any]: """ Dictionary of global attributes of this dataset. .. warning:: attrs is experimental and may change without warning. See Also -------- DataFrame.flags : Global flags applying to this object. """ if self._attrs is None: self._attrs = {} return self._attrs @attrs.setter def attrs(self, value: Mapping[Hashable, Any]) -> None: self._attrs = dict(value) @final @property def flags(self) -> Flags: """ Get the properties associated with this pandas object. The available flags are * :attr:`Flags.allows_duplicate_labels` See Also -------- Flags : Flags that apply to pandas objects. DataFrame.attrs : Global metadata applying to this dataset. Notes ----- "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags <Flags(allows_duplicate_labels=True)> Flags can be get or set using ``.`` >>> df.flags.allows_duplicate_labels True >>> df.flags.allows_duplicate_labels = False Or by slicing with a key >>> df.flags["allows_duplicate_labels"] False >>> df.flags["allows_duplicate_labels"] = True """ return self._flags @final def set_flags( self: NDFrameT, *, copy: bool_t = False, allows_duplicate_labels: bool_t | None = None, ) -> NDFrameT: """ Return a new object with updated flags. Parameters ---------- allows_duplicate_labels : bool, optional Whether the returned object allows duplicate labels. Returns ------- Series or DataFrame The same type as the caller. See Also -------- DataFrame.attrs : Global metadata applying to this dataset. DataFrame.flags : Global flags applying to this object. Notes ----- This method returns a new object that's a view on the same data as the input. Mutating the input or the output values will be reflected in the other. This method is intended to be used in method chains. "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags.allows_duplicate_labels True >>> df2 = df.set_flags(allows_duplicate_labels=False) >>> df2.flags.allows_duplicate_labels False """ df = self.copy(deep=copy) if allows_duplicate_labels is not None: df.flags["allows_duplicate_labels"] = allows_duplicate_labels return df @final @classmethod def _validate_dtype(cls, dtype) -> DtypeObj | None: """validate the passed dtype""" if dtype is not None: dtype = pandas_dtype(dtype) # a compound dtype if dtype.kind == "V": raise NotImplementedError( "compound dtypes are not implemented " f"in the {cls.__name__} constructor" ) return dtype # ---------------------------------------------------------------------- # Construction @property def _constructor(self: NDFrameT) -> Callable[..., NDFrameT]: """ Used when a manipulation result has the same dimensions as the original. """ raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Internals @final @property def _data(self): # GH#33054 retained because some downstream packages uses this, # e.g. fastparquet return self._mgr # ---------------------------------------------------------------------- # Axis _stat_axis_number = 0 _stat_axis_name = "index" _AXIS_ORDERS: list[str] _AXIS_TO_AXIS_NUMBER: dict[Axis, int] = {0: 0, "index": 0, "rows": 0} _info_axis_number: int _info_axis_name: str _AXIS_LEN: int @property def _AXIS_NUMBERS(self) -> dict[str, int]: """.. deprecated:: 1.1.0""" warnings.warn( "_AXIS_NUMBERS has been deprecated.", FutureWarning, stacklevel=find_stack_level(), ) return {"index": 0} @property def _AXIS_NAMES(self) -> dict[int, str]: """.. deprecated:: 1.1.0""" level = self.ndim + 1 warnings.warn( "_AXIS_NAMES has been deprecated.", FutureWarning, stacklevel=level ) return {0: "index"} @final def _construct_axes_dict(self, axes=None, **kwargs): """Return an axes dictionary for myself.""" d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} d.update(kwargs) return d @final @classmethod def _construct_axes_from_arguments( cls, args, kwargs, require_all: bool_t = False, sentinel=None ): """ Construct and returns axes if supplied in args/kwargs. If require_all, raise if all axis arguments are not supplied return a tuple of (axes, kwargs). sentinel specifies the default parameter when an axis is not supplied; useful to distinguish when a user explicitly passes None in scenarios where None has special meaning. """ # construct the args args = list(args) for a in cls._AXIS_ORDERS: # look for a argument by position if a not in kwargs: try: kwargs[a] = args.pop(0) except IndexError as err: if require_all: raise TypeError( "not enough/duplicate arguments specified!" ) from err axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS} return axes, kwargs @final @classmethod def _get_axis_number(cls, axis: Axis) -> int: try: return cls._AXIS_TO_AXIS_NUMBER[axis] except KeyError: raise ValueError(f"No axis named {axis} for object type {cls.__name__}") @final @classmethod def _get_axis_name(cls, axis: Axis) -> str: axis_number = cls._get_axis_number(axis) return cls._AXIS_ORDERS[axis_number] @final def _get_axis(self, axis: Axis) -> Index: axis_number = self._get_axis_number(axis) assert axis_number in {0, 1} return self.index if axis_number == 0 else self.columns @final @classmethod def _get_block_manager_axis(cls, axis: Axis) -> int: """Map the axis to the block_manager axis.""" axis = cls._get_axis_number(axis) ndim = cls._AXIS_LEN if ndim == 2: # i.e. DataFrame return 1 - axis return axis @final def _get_axis_resolvers(self, axis: str) -> dict[str, Series | MultiIndex]: # index or columns axis_index = getattr(self, axis) d = {} prefix = axis[0] for i, name in enumerate(axis_index.names): if name is not None: key = level = name else: # prefix with 'i' or 'c' depending on the input axis # e.g., you must do ilevel_0 for the 0th level of an unnamed # multiiindex key = f"{prefix}level_{i}" level = i level_values = axis_index.get_level_values(level) s = level_values.to_series() s.index = axis_index d[key] = s # put the index/columns itself in the dict if isinstance(axis_index, MultiIndex): dindex = axis_index else: dindex = axis_index.to_series() d[axis] = dindex return d @final def _get_index_resolvers(self) -> dict[Hashable, Series | MultiIndex]: from pandas.core.computation.parsing import clean_column_name d: dict[str, Series | MultiIndex] = {} for axis_name in self._AXIS_ORDERS: d.update(self._get_axis_resolvers(axis_name)) return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)} @final def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]: """ Return the special character free column resolvers of a dataframe. Column names with special characters are 'cleaned up' so that they can be referred to by backtick quoting. Used in :meth:`DataFrame.eval`. """ from pandas.core.computation.parsing import clean_column_name if isinstance(self, ABCSeries): return {clean_column_name(self.name): self} return { clean_column_name(k): v for k, v in self.items() if not isinstance(k, int) } @property def _info_axis(self) -> Index: return getattr(self, self._info_axis_name) @property def _stat_axis(self) -> Index: return getattr(self, self._stat_axis_name) @property def shape(self) -> tuple[int, ...]: """ Return a tuple of axis dimensions """ return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) @property def axes(self) -> list[Index]: """ Return index label(s) of the internal NDFrame """ # we do it this way because if we have reversed axes, then # the block manager shows then reversed return [self._get_axis(a) for a in self._AXIS_ORDERS] @property def ndim(self) -> int: """ Return an int representing the number of axes / array dimensions. Return 1 if Series. Otherwise return 2 if DataFrame. See Also -------- ndarray.ndim : Number of array dimensions. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.ndim 1 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.ndim 2 """ return self._mgr.ndim @property def size(self) -> int: """ Return an int representing the number of elements in this object. Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame. See Also -------- ndarray.size : Number of elements in the array. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.size 3 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.size 4 """ return np.prod(self.shape) @overload def set_axis( self: NDFrameT, labels, axis: Axis = ..., inplace: Literal[False] = ... ) -> NDFrameT: ... @overload def set_axis(self, labels, axis: Axis, inplace: Literal[True]) -> None: ... @overload def set_axis(self, labels, *, inplace: Literal[True]) -> None: ... @overload def set_axis( self: NDFrameT, labels, axis: Axis = ..., inplace: bool_t = ... ) -> NDFrameT | None: ... def set_axis(self, labels, axis: Axis = 0, inplace: bool_t = False): """ Assign desired index to given axis. Indexes for%(extended_summary_sub)s row labels can be changed by assigning a list-like or Index. Parameters ---------- labels : list-like, Index The values for the new index. axis : %(axes_single_arg)s, default 0 The axis to update. The value 0 identifies the rows%(axis_description_sub)s. inplace : bool, default False Whether to return a new %(klass)s instance. Returns ------- renamed : %(klass)s or None An object of type %(klass)s or None if ``inplace=True``. See Also -------- %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s. """ self._check_inplace_and_allows_duplicate_labels(inplace) return self._set_axis_nocheck(labels, axis, inplace) @final def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool_t): # NDFrame.rename with inplace=False calls set_axis(inplace=True) on a copy. if inplace: setattr(self, self._get_axis_name(axis), labels) else: obj = self.copy() obj.set_axis(labels, axis=axis, inplace=True) return obj def _set_axis(self, axis: int, labels: Index) -> None: labels = ensure_index(labels) self._mgr.set_axis(axis, labels) self._clear_item_cache() @final def swapaxes(self: NDFrameT, axis1, axis2, copy=True) -> NDFrameT: """ Interchange axes and swap values axes appropriately. Returns ------- y : same as input """ i = self._get_axis_number(axis1) j = self._get_axis_number(axis2) if i == j: if copy: return self.copy() return self mapping = {i: j, j: i} new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN)) new_values = self.values.swapaxes(i, j) if copy: new_values = new_values.copy() return self._constructor( new_values, *new_axes, ).__finalize__(self, method="swapaxes") @final @doc(klass=_shared_doc_kwargs["klass"]) def droplevel(self: NDFrameT, level, axis=0) -> NDFrameT: """ Return {klass} with requested index / column level(s) removed. Parameters ---------- level : int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Axis along which the level(s) is removed: * 0 or 'index': remove level(s) in column. * 1 or 'columns': remove level(s) in row. Returns ------- {klass} {klass} with requested index / column level(s) removed. Examples -------- >>> df = pd.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12] ... ]).set_index([0, 1]).rename_axis(['a', 'b']) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level_2', axis=1) level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 """ labels = self._get_axis(axis) new_labels = labels.droplevel(level) return self.set_axis(new_labels, axis=axis, inplace=False) def pop(self, item: Hashable) -> Series | Any: result = self[item] del self[item] return result @final def squeeze(self, axis=None): """ Squeeze 1 dimensional axis objects into scalars. Series or DataFrames with a single element are squeezed to a scalar. DataFrames with a single column or a single row are squeezed to a Series. Otherwise the object is unchanged. This method is most useful when you don't know if your object is a Series or DataFrame, but you do know it has just a single column. In that case you can safely call `squeeze` to ensure you have a Series. Parameters ---------- axis : {0 or 'index', 1 or 'columns', None}, default None A specific axis to squeeze. By default, all length-1 axes are squeezed. Returns ------- DataFrame, Series, or scalar The projection after squeezing `axis` or all the axes. See Also -------- Series.iloc : Integer-location based indexing for selecting scalars. DataFrame.iloc : Integer-location based indexing for selecting Series. Series.to_frame : Inverse of DataFrame.squeeze for a single-column DataFrame. Examples -------- >>> primes = pd.Series([2, 3, 5, 7]) Slicing might produce a Series with a single value: >>> even_primes = primes[primes % 2 == 0] >>> even_primes 0 2 dtype: int64 >>> even_primes.squeeze() 2 Squeezing objects with more than one value in every axis does nothing: >>> odd_primes = primes[primes % 2 == 1] >>> odd_primes 1 3 2 5 3 7 dtype: int64 >>> odd_primes.squeeze() 1 3 2 5 3 7 dtype: int64 Squeezing is even more effective when used with DataFrames. >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) >>> df a b 0 1 2 1 3 4 Slicing a single column will produce a DataFrame with the columns having only one value: >>> df_a = df[['a']] >>> df_a a 0 1 1 3 So the columns can be squeezed down, resulting in a Series: >>> df_a.squeeze('columns') 0 1 1 3 Name: a, dtype: int64 Slicing a single row from a single column will produce a single scalar DataFrame: >>> df_0a = df.loc[df.index < 1, ['a']] >>> df_0a a 0 1 Squeezing the rows produces a single scalar Series: >>> df_0a.squeeze('rows') a 1 Name: 0, dtype: int64 Squeezing all axes will project directly into a scalar: >>> df_0a.squeeze() 1 """ axis = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),) return self.iloc[ tuple( 0 if i in axis and len(a) == 1 else slice(None) for i, a in enumerate(self.axes) ) ] # ---------------------------------------------------------------------- # Rename def _rename( self: NDFrameT, mapper: Renamer | None = None, *, index: Renamer | None = None, columns: Renamer | None = None, axis: Axis | None = None, copy: bool_t = True, inplace: bool_t = False, level: Level | None = None, errors: str = "ignore", ) -> NDFrameT | None: """ Alter axes input function or functions. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value (Series only). Parameters ---------- %(axes)s : scalar, list-like, dict-like or function, optional Scalar or list-like will alter the ``Series.name`` attribute, and raise on DataFrame. dict-like or functions are transformations to apply to that axis' values copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new {klass}. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- renamed : {klass} (new object) Raises ------ KeyError If any of the labels is not found in the selected axis and "errors='raise'". See Also -------- NDFrame.rename_axis Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 Since ``DataFrame`` doesn't have a ``.name`` attribute, only mapping-type arguments are allowed. >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(2) Traceback (most recent call last): ... TypeError: 'int' object is not callable ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. >>> df.rename(index=str, columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 >>> df.rename(index=str, columns={"A": "a", "C": "c"}) a B 0 1 4 1 2 5 2 3 6 Using axis-style parameters >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6 See the :ref:`user guide <basics.rename>` for more. """ if mapper is None and index is None and columns is None: raise TypeError("must pass an index to rename") if index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) elif mapper is not None: raise TypeError( "Cannot specify both 'mapper' and any of 'index' or 'columns'" ) else: # use the mapper argument if axis and self._get_axis_number(axis) == 1: columns = mapper else: index = mapper self._check_inplace_and_allows_duplicate_labels(inplace) result = self if inplace else self.copy(deep=copy) for axis_no, replacements in enumerate((index, columns)): if replacements is None: continue ax = self._get_axis(axis_no) f = com.get_rename_function(replacements) if level is not None: level = ax._get_level_number(level) # GH 13473 if not callable(replacements): if ax._is_multi and level is not None: indexer = ax.get_level_values(level).get_indexer_for(replacements) else: indexer = ax.get_indexer_for(replacements) if errors == "raise" and len(indexer[indexer == -1]): missing_labels = [ label for index, label in enumerate(replacements) if indexer[index] == -1 ] raise KeyError(f"{missing_labels} not found in axis") new_index = ax._transform_index(f, level=level) result._set_axis_nocheck(new_index, axis=axis_no, inplace=True) result._clear_item_cache() if inplace: self._update_inplace(result) return None else: return result.__finalize__(self, method="rename") @rewrite_axis_style_signature("mapper", [("copy", True), ("inplace", False)]) def rename_axis(self, mapper=lib.no_default, **kwargs): """ Set the name of the axis for the index or columns. Parameters ---------- mapper : scalar, list-like, optional Value to set the axis name attribute. index, columns : scalar, list-like, dict-like or function, optional A scalar, list-like, dict-like or functions transformations to apply to that axis' values. Note that the ``columns`` parameter is not allowed if the object is a Series. This parameter only apply for DataFrame type objects. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and/or ``columns``. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to rename. copy : bool, default True Also copy underlying data. inplace : bool, default False Modifies the object directly, instead of creating a new Series or DataFrame. Returns ------- Series, DataFrame, or None The same type as the caller or None if ``inplace=True``. See Also -------- Series.rename : Alter Series index labels or name. DataFrame.rename : Alter DataFrame index labels or name. Index.rename : Set new names on index. Notes ----- ``DataFrame.rename_axis`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` The first calling convention will only modify the names of the index and/or the names of the Index object that is the columns. In this case, the parameter ``copy`` is ignored. The second calling convention will modify the names of the corresponding index if mapper is a list or a scalar. However, if mapper is dict-like or a function, it will use the deprecated behavior of modifying the axis *labels*. We *highly* recommend using keyword arguments to clarify your intent. Examples -------- **Series** >>> s = pd.Series(["dog", "cat", "monkey"]) >>> s 0 dog 1 cat 2 monkey dtype: object >>> s.rename_axis("animal") animal 0 dog 1 cat 2 monkey dtype: object **DataFrame** >>> df = pd.DataFrame({"num_legs": [4, 4, 2], ... "num_arms": [0, 0, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs num_arms dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("animal") >>> df num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("limbs", axis="columns") >>> df limbs num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 **MultiIndex** >>> df.index = pd.MultiIndex.from_product([['mammal'], ... ['dog', 'cat', 'monkey']], ... names=['type', 'name']) >>> df limbs num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(index={'type': 'class'}) limbs num_legs num_arms class name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(columns=str.upper) LIMBS num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 """ axes, kwargs = self._construct_axes_from_arguments( (), kwargs, sentinel=lib.no_default ) copy = kwargs.pop("copy", True) inplace = kwargs.pop("inplace", False) axis = kwargs.pop("axis", 0) if axis is not None: axis = self._get_axis_number(axis) if kwargs: raise TypeError( "rename_axis() got an unexpected keyword " f'argument "{list(kwargs.keys())[0]}"' ) inplace = validate_bool_kwarg(inplace, "inplace") if mapper is not lib.no_default: # Use v0.23 behavior if a scalar or list non_mapper = is_scalar(mapper) or ( is_list_like(mapper) and not is_dict_like(mapper) ) if non_mapper: return self._set_axis_name(mapper, axis=axis, inplace=inplace) else: raise ValueError("Use `.rename` to alter labels with a mapper.") else: # Use new behavior. Means that index and/or columns # is specified result = self if inplace else self.copy(deep=copy) for axis in range(self._AXIS_LEN): v = axes.get(self._get_axis_name(axis)) if v is lib.no_default: continue non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v)) if non_mapper: newnames = v else: f = com.get_rename_function(v) curnames = self._get_axis(axis).names newnames = [f(name) for name in curnames] result._set_axis_name(newnames, axis=axis, inplace=True) if not inplace: return result @final def _set_axis_name(self, name, axis=0, inplace=False): """ Set the name(s) of the axis. Parameters ---------- name : str or list of str Name(s) to set. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to set the label. The value 0 or 'index' specifies index, and the value 1 or 'columns' specifies columns. inplace : bool, default False If `True`, do operation inplace and return None. Returns ------- Series, DataFrame, or None The same type as the caller or `None` if `inplace` is `True`. See Also -------- DataFrame.rename : Alter the axis labels of :class:`DataFrame`. Series.rename : Alter the index labels or set the index name of :class:`Series`. Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`. Examples -------- >>> df = pd.DataFrame({"num_legs": [4, 4, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs dog 4 cat 4 monkey 2 >>> df._set_axis_name("animal") num_legs animal dog 4 cat 4 monkey 2 >>> df.index = pd.MultiIndex.from_product( ... [["mammal"], ['dog', 'cat', 'monkey']]) >>> df._set_axis_name(["type", "name"]) num_legs type name mammal dog 4 cat 4 monkey 2 """ axis = self._get_axis_number(axis) idx = self._get_axis(axis).set_names(name) inplace = validate_bool_kwarg(inplace, "inplace") renamed = self if inplace else self.copy() renamed.set_axis(idx, axis=axis, inplace=True) if not inplace: return renamed # ---------------------------------------------------------------------- # Comparison Methods @final def _indexed_same(self, other) -> bool_t: return all( self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS ) @final def equals(self, other: object) -> bool_t: """ Test whether two objects contain the same elements. This function allows two Series or DataFrames to be compared against each other to see if they have the same shape and elements. NaNs in the same location are considered equal. The row/column index do not need to have the same type, as long as the values are considered equal. Corresponding columns must be of the same dtype. Parameters ---------- other : Series or DataFrame The other Series or DataFrame to be compared with the first. Returns ------- bool True if all elements are the same in both objects, False otherwise. See Also -------- Series.eq : Compare two Series objects of the same length and return a Series where each element is True if the element in each Series is equal, False otherwise. DataFrame.eq : Compare two DataFrame objects of the same shape and return a DataFrame where each element is True if the respective element in each DataFrame is equal, False otherwise. testing.assert_series_equal : Raises an AssertionError if left and right are not equal. Provides an easy interface to ignore inequality in dtypes, indexes and precision among others. testing.assert_frame_equal : Like assert_series_equal, but targets DataFrames. numpy.array_equal : Return True if two arrays have the same shape and elements, False otherwise. Examples -------- >>> df = pd.DataFrame({1: [10], 2: [20]}) >>> df 1 2 0 10 20 DataFrames df and exactly_equal have the same types and values for their elements and column labels, which will return True. >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]}) >>> exactly_equal 1 2 0 10 20 >>> df.equals(exactly_equal) True DataFrames df and different_column_type have the same element types and values, but have different types for the column labels, which will still return True. >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]}) >>> different_column_type 1.0 2.0 0 10 20 >>> df.equals(different_column_type) True DataFrames df and different_data_type have different types for the same values for their elements, and will return False even though their column labels are the same values and types. >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]}) >>> different_data_type 1 2 0 10.0 20.0 >>> df.equals(different_data_type) False """ if not (isinstance(other, type(self)) or isinstance(self, type(other))): return False other = cast(NDFrame, other) return self._mgr.equals(other._mgr) # ------------------------------------------------------------------------- # Unary Methods @final def __neg__(self): def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): return operator.inv(values) else: return operator.neg(values) new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__neg__") @final def __pos__(self): def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): return values.copy() else: return operator.pos(values) new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__pos__") @final def __invert__(self): if not self.size: # inv fails with 0 len return self new_data = self._mgr.apply(operator.invert) return self._constructor(new_data).__finalize__(self, method="__invert__") @final def __nonzero__(self): raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ @final def bool(self): """ Return the bool of a single element Series or DataFrame. This must be a boolean scalar value, either True or False. It will raise a ValueError if the Series or DataFrame does not have exactly 1 element, or that element is not boolean (integer values 0 and 1 will also raise an exception). Returns ------- bool The value in the Series or DataFrame. See Also -------- Series.astype : Change the data type of a Series, including to boolean. DataFrame.astype : Change the data type of a DataFrame, including to boolean. numpy.bool_ : NumPy boolean data type, used by pandas for boolean values. Examples -------- The method will only work for single element objects with a boolean value: >>> pd.Series([True]).bool() True >>> pd.Series([False]).bool() False >>> pd.DataFrame({'col': [True]}).bool() True >>> pd.DataFrame({'col': [False]}).bool() False """ v = self.squeeze() if isinstance(v, (bool, np.bool_)): return bool(v) elif is_scalar(v): raise ValueError( "bool cannot act on a non-boolean single element " f"{type(self).__name__}" ) self.__nonzero__() @final def abs(self: NDFrameT) -> NDFrameT: """ Return a Series/DataFrame with absolute numeric value of each element. This function only applies to elements that are all numeric. Returns ------- abs Series/DataFrame containing the absolute value of each element. See Also -------- numpy.absolute : Calculate the absolute value element-wise. Notes ----- For ``complex`` inputs, ``1.2 + 1j``, the absolute value is :math:`\\sqrt{ a^2 + b^2 }`. Examples -------- Absolute numeric values in a Series. >>> s = pd.Series([-1.10, 2, -3.33, 4]) >>> s.abs() 0 1.10 1 2.00 2 3.33 3 4.00 dtype: float64 Absolute numeric values in a Series with complex numbers. >>> s = pd.Series([1.2 + 1j]) >>> s.abs() 0 1.56205 dtype: float64 Absolute numeric values in a Series with a Timedelta element. >>> s = pd.Series([pd.Timedelta('1 days')]) >>> s.abs() 0 1 days dtype: timedelta64[ns] Select rows with data closest to certain value using argsort (from `StackOverflow <https://stackoverflow.com/a/17758115>`__). >>> df = pd.DataFrame({ ... 'a': [4, 5, 6, 7], ... 'b': [10, 20, 30, 40], ... 'c': [100, 50, -30, -50] ... }) >>> df a b c 0 4 10 100 1 5 20 50 2 6 30 -30 3 7 40 -50 >>> df.loc[(df.c - 43).abs().argsort()] a b c 1 5 20 50 0 4 10 100 2 6 30 -30 3 7 40 -50 """ res_mgr = self._mgr.apply(np.abs) return self._constructor(res_mgr).__finalize__(self, name="abs") @final def __abs__(self: NDFrameT) -> NDFrameT: return self.abs() @final def __round__(self: NDFrameT, decimals: int = 0) -> NDFrameT: return self.round(decimals).__finalize__(self, method="__round__") # ------------------------------------------------------------------------- # Label or Level Combination Helpers # # A collection of helper methods for DataFrame/Series operations that # accept a combination of column/index labels and levels. All such # operations should utilize/extend these methods when possible so that we # have consistent precedence and validation logic throughout the library. @final def _is_level_reference(self, key, axis=0): """ Test whether a key is a level reference for a given axis. To be considered a level reference, `key` must be a string that: - (axis=0): Matches the name of an index level and does NOT match a column label. - (axis=1): Matches the name of a column level and does NOT match an index label. Parameters ---------- key : str Potential level name for the given axis axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- is_level : bool """ axis = self._get_axis_number(axis) return ( key is not None and is_hashable(key) and key in self.axes[axis].names and not self._is_label_reference(key, axis=axis) ) @final def _is_label_reference(self, key, axis=0) -> bool_t: """ Test whether a key is a label reference for a given axis. To be considered a label reference, `key` must be a string that: - (axis=0): Matches a column label - (axis=1): Matches an index label Parameters ---------- key : str Potential label name axis : int, default 0 Axis perpendicular to the axis that labels are associated with (0 means search for column labels, 1 means search for index labels) Returns ------- is_label: bool """ axis = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis) return ( key is not None and is_hashable(key) and any(key in self.axes[ax] for ax in other_axes) ) @final def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t: """ Test whether a key is a label or level reference for a given axis. To be considered either a label or a level reference, `key` must be a string that: - (axis=0): Matches a column label or an index level - (axis=1): Matches an index label or a column level Parameters ---------- key : str Potential label or level name axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- bool """ return self._is_level_reference(key, axis=axis) or self._is_label_reference( key, axis=axis ) @final def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None: """ Check whether `key` is ambiguous. By ambiguous, we mean that it matches both a level of the input `axis` and a label of the other axis. Parameters ---------- key : str or object Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns). Raises ------ ValueError: `key` is ambiguous """ axis = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis) if ( key is not None and is_hashable(key) and key in self.axes[axis].names and any(key in self.axes[ax] for ax in other_axes) ): # Build an informative and grammatical warning level_article, level_type = ( ("an", "index") if axis == 0 else ("a", "column") ) label_article, label_type = ( ("a", "column") if axis == 0 else ("an", "index") ) msg = ( f"'{key}' is both {level_article} {level_type} level and " f"{label_article} {label_type} label, which is ambiguous." ) raise ValueError(msg) @final def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray: """ Return a 1-D array of values associated with `key`, a label or level from the given `axis`. Retrieval logic: - (axis=0): Return column values if `key` matches a column label. Otherwise return index level values if `key` matches an index level. - (axis=1): Return row values if `key` matches an index label. Otherwise return column level values if 'key' matches a column level Parameters ---------- key : str Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- values : np.ndarray Raises ------ KeyError if `key` matches neither a label nor a level ValueError if `key` matches multiple labels FutureWarning if `key` is ambiguous. This will become an ambiguity error in a future version """ axis = self._get_axis_number(axis) other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] if self._is_label_reference(key, axis=axis): self._check_label_or_level_ambiguity(key, axis=axis) values = self.xs(key, axis=other_axes[0])._values elif self._is_level_reference(key, axis=axis): values = self.axes[axis].get_level_values(key)._values else: raise KeyError(key) # Check for duplicates if values.ndim > 1: if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex): multi_message = ( "\n" "For a multi-index, the label must be a " "tuple with elements corresponding to each level." ) else: multi_message = "" label_axis_name = "column" if axis == 0 else "index" raise ValueError( f"The {label_axis_name} label '{key}' is not unique.{multi_message}" ) return values @final def _drop_labels_or_levels(self, keys, axis: int = 0): """ Drop labels and/or levels for the given `axis`. For each key in `keys`: - (axis=0): If key matches a column label then drop the column. Otherwise if key matches an index level then drop the level. - (axis=1): If key matches an index label then drop the row. Otherwise if key matches a column level then drop the level. Parameters ---------- keys : str or list of str labels or levels to drop axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- dropped: DataFrame Raises ------ ValueError if any `keys` match neither a label nor a level """ axis = self._get_axis_number(axis) # Validate keys keys = com.maybe_make_list(keys) invalid_keys = [ k for k in keys if not self._is_label_or_level_reference(k, axis=axis) ] if invalid_keys: raise ValueError( "The following keys are not valid labels or " f"levels for axis {axis}: {invalid_keys}" ) # Compute levels and labels to drop levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)] labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)] # Perform copy upfront and then use inplace operations below. # This ensures that we always perform exactly one copy. # ``copy`` and/or ``inplace`` options could be added in the future. dropped = self.copy() if axis == 0: # Handle dropping index levels if levels_to_drop: dropped.reset_index(levels_to_drop, drop=True, inplace=True) # Handle dropping columns labels if labels_to_drop: dropped.drop(labels_to_drop, axis=1, inplace=True) else: # Handle dropping column levels if levels_to_drop: if isinstance(dropped.columns, MultiIndex): # Drop the specified levels from the MultiIndex dropped.columns = dropped.columns.droplevel(levels_to_drop) else: # Drop the last level of Index by replacing with # a RangeIndex dropped.columns = RangeIndex(dropped.columns.size) # Handle dropping index labels if labels_to_drop: dropped.drop(labels_to_drop, axis=0, inplace=True) return dropped # ---------------------------------------------------------------------- # Iteration # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: None # type: ignore[assignment] def __iter__(self): """ Iterate over info axis. Returns ------- iterator Info axis as iterator. """ return iter(self._info_axis) # can we get a better explanation of this? def keys(self): """ Get the 'info axis' (see Indexing for more). This is index for Series, columns for DataFrame. Returns ------- Index Info axis. """ return self._info_axis def items(self): """ Iterate over (label, values) on info axis This is index for Series and columns for DataFrame. Returns ------- Generator """ for h in self._info_axis: yield h, self[h] def __len__(self) -> int: """Returns length of info axis""" return len(self._info_axis) @final def __contains__(self, key) -> bool_t: """True if the key is in the info axis""" return key in self._info_axis @property def empty(self) -> bool_t: """ Indicator whether Series/DataFrame is empty. True if Series/DataFrame is entirely empty (no items), meaning any of the axes are of length 0. Returns ------- bool If Series/DataFrame is empty, return True, if not return False. See Also -------- Series.dropna : Return series without null values. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. Notes ----- If Series/DataFrame contains only NaNs, it is still not considered empty. See the example below. Examples -------- An example of an actual empty DataFrame. Notice the index is empty: >>> df_empty = pd.DataFrame({'A' : []}) >>> df_empty Empty DataFrame Columns: [A] Index: [] >>> df_empty.empty True If we only have NaNs in our DataFrame, it is not considered empty! We will need to drop the NaNs to make the DataFrame empty: >>> df = pd.DataFrame({'A' : [np.nan]}) >>> df A 0 NaN >>> df.empty False >>> df.dropna().empty True >>> ser_empty = pd.Series({'A' : []}) >>> ser_empty A [] dtype: object >>> ser_empty.empty False >>> ser_empty = pd.Series() >>> ser_empty.empty True """ return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS) # ---------------------------------------------------------------------- # Array Interface # This is also set in IndexOpsMixin # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented __array_priority__ = 1000 def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: return np.asarray(self._values, dtype=dtype) def __array_wrap__( self, result: np.ndarray, context: tuple[Callable, tuple[Any, ...], int] | None = None, ): """ Gets called after a ufunc and other functions. Parameters ---------- result: np.ndarray The result of the ufunc or other function called on the NumPy array returned by __array__ context: tuple of (func, tuple, int) This parameter is returned by ufuncs as a 3-element tuple: (name of the ufunc, arguments of the ufunc, domain of the ufunc), but is not set by other numpy functions.q Notes ----- Series implements __array_ufunc_ so this not called for ufunc on Series. """ # Note: at time of dask 2022.01.0, this is still used by dask warnings.warn( "The __array_wrap__ method of DataFrame and Series will be removed in " "a future version", DeprecationWarning, stacklevel=2, ) res = lib.item_from_zerodim(result) if is_scalar(res): # e.g. we get here with np.ptp(series) # ptp also requires the item_from_zerodim return res d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) return self._constructor(res, **d).__finalize__(self, method="__array_wrap__") @final def __array_ufunc__( self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any ): return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs) # ---------------------------------------------------------------------- # Picklability @final def __getstate__(self) -> dict[str, Any]: meta = {k: getattr(self, k, None) for k in self._metadata} return { "_mgr": self._mgr, "_typ": self._typ, "_metadata": self._metadata, "attrs": self.attrs, "_flags": {k: self.flags[k] for k in self.flags._keys}, **meta, } @final def __setstate__(self, state): if isinstance(state, BlockManager): self._mgr = state elif isinstance(state, dict): if "_data" in state and "_mgr" not in state: # compat for older pickles state["_mgr"] = state.pop("_data") typ = state.get("_typ") if typ is not None: attrs = state.get("_attrs", {}) object.__setattr__(self, "_attrs", attrs) flags = state.get("_flags", {"allows_duplicate_labels": True}) object.__setattr__(self, "_flags", Flags(self, **flags)) # set in the order of internal names # to avoid definitional recursion # e.g. say fill_value needing _mgr to be # defined meta = set(self._internal_names + self._metadata) for k in list(meta): if k in state and k != "_flags": v = state[k] object.__setattr__(self, k, v) for k, v in state.items(): if k not in meta: object.__setattr__(self, k, v) else: raise NotImplementedError("Pre-0.12 pickles are no longer supported") elif len(state) == 2: raise NotImplementedError("Pre-0.12 pickles are no longer supported") self._item_cache = {} # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: # string representation based upon iterating over self # (since, by definition, `PandasContainers` are iterable) prepr = f"[{",".join(map(pprint_thing, self))}]" return f"{type(self).__name__}({prepr})" @final def _repr_latex_(self): """ Returns a LaTeX representation for a particular object. Mainly for use with nbconvert (jupyter notebook conversion to pdf). """ if config.get_option("display.latex.repr"): return self.to_latex() else: return None @final def _repr_data_resource_(self): """ Not a real Jupyter special repr method, but we use the same naming convention. """ if config.get_option("display.html.table_schema"): data = self.head(config.get_option("display.max_rows")) as_json = data.to_json(orient="table") as_json = cast(str, as_json) return json.loads(as_json, object_pairs_hook=collections.OrderedDict) # ---------------------------------------------------------------------- # I/O Methods @final @doc(klass="object", storage_options=_shared_docs["storage_options"]) def to_excel( self, excel_writer, sheet_name: str = "Sheet1", na_rep: str = "", float_format: str | None = None, columns=None, header=True, index=True, index_label=None, startrow=0, startcol=0, engine=None, merge_cells=True, encoding=None, inf_rep="inf", verbose=True, freeze_panes=None, storage_options: StorageOptions = None, ) -> None: """ Write {klass} to an Excel sheet. To write a single {klass} to an Excel .xlsx file it is only necessary to specify a target file name. To write to multiple sheets it is necessary to create an `ExcelWriter` object with a target file name, and specify a sheet in the file to write to. Multiple sheets may be written to by specifying unique `sheet_name`. With all data written to the file it is necessary to save the changes. Note that creating an `ExcelWriter` object with a file name that already exists will result in the contents of the existing file being erased. Parameters ---------- excel_writer : path-like, file-like, or ExcelWriter object File path or existing ExcelWriter. sheet_name : str, default 'Sheet1' Name of sheet which will contain DataFrame. na_rep : str, default '' Missing data representation. float_format : str, optional Format string for floating point numbers. For example ``float_format="%.2f"`` will format 0.1234 to 0.12. columns : sequence or list of str, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of string is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, optional Column label for index column(s) if desired. If not specified, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. startrow : int, default 0 Upper left cell row to dump data frame. startcol : int, default 0 Upper left cell column to dump data frame. engine : str, optional Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and ``io.excel.xlsm.writer``. .. deprecated:: 1.2.0 As the `xlwt <https://pypi.org/project/xlwt/>`__ package is no longer maintained, the ``xlwt`` engine will be removed in a future version of pandas. merge_cells : bool, default True Write MultiIndex and Hierarchical Rows as merged cells. encoding : str, optional Encoding of the resulting excel file. Only necessary for xlwt, other writers support unicode natively. inf_rep : str, default 'inf' Representation for infinity (there is no native representation for infinity in Excel). verbose : bool, default True Display more information in the error logs. freeze_panes : tuple of int (length 2), optional Specifies the one-based bottommost row and rightmost column that is to be frozen. {storage_options} .. versionadded:: 1.2.0 See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. ExcelWriter : Class for writing DataFrame objects into excel sheets. read_excel : Read an Excel file into a pandas DataFrame. read_csv : Read a comma-separated values (csv) file into DataFrame. Notes ----- For compatibility with :meth:`~DataFrame.to_csv`, to_excel serializes lists and dicts to strings before writing. Once a workbook has been saved it is not possible to write further data without rewriting the whole workbook. Examples -------- Create, write to and save a workbook: >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) >>> df1.to_excel("output.xlsx") # doctest: +SKIP To specify the sheet name: >>> df1.to_excel("output.xlsx", ... sheet_name='Sheet_name_1') # doctest: +SKIP If you wish to write to more than one sheet in the workbook, it is necessary to specify an ExcelWriter object: >>> df2 = df1.copy() >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP ... df1.to_excel(writer, sheet_name='Sheet_name_1') ... df2.to_excel(writer, sheet_name='Sheet_name_2') ExcelWriter can also be used to append to an existing Excel file: >>> with pd.ExcelWriter('output.xlsx', ... mode='a') as writer: # doctest: +SKIP ... df.to_excel(writer, sheet_name='Sheet_name_3') To set the library that is used to write the Excel file, you can pass the `engine` keyword (the default engine is automatically chosen depending on the file extension): >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() from pandas.io.formats.excel import ExcelFormatter formatter = ExcelFormatter( df, na_rep=na_rep, cols=columns, header=header, float_format=float_format, index=index, index_label=index_label, merge_cells=merge_cells, inf_rep=inf_rep, ) formatter.write( excel_writer, sheet_name=sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes, engine=engine, storage_options=storage_options, ) @final @doc( storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_json( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, orient: str | None = None, date_format: str | None = None, double_precision: int = 10, force_ascii: bool_t = True, date_unit: str = "ms", default_handler: Callable[[Any], JSONSerializable] | None = None, lines: bool_t = False, compression: CompressionOptions = "infer", index: bool_t = True, indent: int | None = None, storage_options: StorageOptions = None, ) -> str | None: """ Convert the object to a JSON string. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. orient : str Indication of expected JSON string format. * Series: - default is 'index' - allowed values are: {{'split', 'records', 'index', 'table'}}. * DataFrame: - default is 'columns' - allowed values are: {{'split', 'records', 'index', 'columns', 'values', 'table'}}. * The format of the JSON string: - 'split' : dict like {{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}} - 'records' : list like [{{column -> value}}, ... , {{column -> value}}] - 'index' : dict like {{index -> {{column -> value}}}} - 'columns' : dict like {{column -> {{index -> value}}}} - 'values' : just the values array - 'table' : dict like {{'schema': {{schema}}, 'data': {{data}}}} Describing the data, where data component is like ``orient='records'``. date_format : {{None, 'epoch', 'iso'}} Type of date conversion. 'epoch' = epoch milliseconds, 'iso' = ISO8601. The default depends on the `orient`. For ``orient='table'``, the default is 'iso'. For all other orients, the default is 'epoch'. double_precision : int, default 10 The number of decimal places to use when encoding floating point values. force_ascii : bool, default True Force encoded string to be ASCII. date_unit : str, default 'ms' (milliseconds) The time unit to encode to, governs timestamp and ISO8601 precision. One of 's', 'ms', 'us', 'ns' for second, millisecond, microsecond, and nanosecond respectively. default_handler : callable, default None Handler to call if object cannot otherwise be converted to a suitable format for JSON. Should receive a single argument which is the object to convert and return a serialisable object. lines : bool, default False If 'orient' is 'records' write out line-delimited json format. Will throw ValueError if incorrect 'orient' since others are not list-like. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. index : bool, default True Whether to include the index values in the JSON string. Not including the index (``index=False``) is only supported when orient is 'split' or 'table'. indent : int, optional Length of whitespace used to indent each record. .. versionadded:: 1.0.0 {storage_options} .. versionadded:: 1.2.0 Returns ------- None or str If path_or_buf is None, returns the resulting json format as a string. Otherwise returns None. See Also -------- read_json : Convert a JSON string to pandas object. Notes ----- The behavior of ``indent=0`` varies from the stdlib, which does not indent the output but does insert newlines. Currently, ``indent=0`` and the default ``indent=None`` are equivalent in pandas, though this may change in a future release. ``orient='table'`` contains a 'pandas_version' field under 'schema'. This stores the version of `pandas` used in the latest revision of the schema. Examples -------- >>> import json >>> df = pd.DataFrame( ... [["a", "b"], ["c", "d"]], ... index=["row 1", "row 2"], ... columns=["col 1", "col 2"], ... ) >>> result = df.to_json(orient="split") >>> parsed = json.loads(result) >>> json.dumps(parsed, indent=4) # doctest: +SKIP {{ "columns": [ "col 1", "col 2" ], "index": [ "row 1", "row 2" ], "data": [ [ "a", "b" ], [ "c", "d" ] ] }} Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> result = df.to_json(orient="records") >>> parsed = json.loads(result) >>> json.dumps(parsed, indent=4) # doctest: +SKIP [ {{ "col 1": "a", "col 2": "b" }}, {{ "col 1": "c", "col 2": "d" }} ] Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> result = df.to_json(orient="index") >>> parsed = json.loads(result) >>> json.dumps(parsed, indent=4) # doctest: +SKIP {{ "row 1": {{ "col 1": "a", "col 2": "b" }}, "row 2": {{ "col 1": "c", "col 2": "d" }} }} Encoding/decoding a Dataframe using ``'columns'`` formatted JSON: >>> result = df.to_json(orient="columns") >>> parsed = json.loads(result) >>> json.dumps(parsed, indent=4) # doctest: +SKIP {{ "col 1": {{ "row 1": "a", "row 2": "c" }}, "col 2": {{ "row 1": "b", "row 2": "d" }} }} Encoding/decoding a Dataframe using ``'values'`` formatted JSON: >>> result = df.to_json(orient="values") >>> parsed = json.loads(result) >>> json.dumps(parsed, indent=4) # doctest: +SKIP [ [ "a", "b" ], [ "c", "d" ] ] Encoding with Table Schema: >>> result = df.to_json(orient="table") >>> parsed = json.loads(result) >>> json.dumps(parsed, indent=4) # doctest: +SKIP {{ "schema": {{ "fields": [ {{ "name": "index", "type": "string" }}, {{ "name": "col 1", "type": "string" }}, {{ "name": "col 2", "type": "string" }} ], "primaryKey": [ "index" ], "pandas_version": "1.4.0" }}, "data": [ {{ "index": "row 1", "col 1": "a", "col 2": "b" }}, {{ "index": "row 2", "col 1": "c", "col 2": "d" }} ] }} """ from pandas.io import json if date_format is None and orient == "table": date_format = "iso" elif date_format is None: date_format = "epoch" config.is_nonnegative_int(indent) indent = indent or 0 return json.to_json( path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, lines=lines, compression=compression, index=index, indent=indent, storage_options=storage_options, ) @final def to_hdf( self, path_or_buf, key: str, mode: str = "a", complevel: int | None = None, complib: str | None = None, append: bool_t = False, format: str | None = None, index: bool_t = True, min_itemsize: int | dict[str, int] | None = None, nan_rep=None, dropna: bool_t | None = None, data_columns: Literal[True] | list[str] | None = None, errors: str = "strict", encoding: str = "UTF-8", ) -> None: """ Write the contained data to an HDF5 file using HDFStore. Hierarchical Data Format (HDF) is self-describing, allowing an application to interpret the structure and contents of a file with no outside information. One HDF file can hold a mix of related objects which can be accessed as a group or as individual objects. In order to add another DataFrame or Series to an existing HDF file please use append mode and a different a key. .. warning:: One can store a subclass of ``DataFrame`` or ``Series`` to HDF5, but the type of the subclass is lost upon storing. For more information see the :ref:`user guide <io.hdf5>`. Parameters ---------- path_or_buf : str or pandas.HDFStore File path or HDFStore object. key : str Identifier for the group in the store. mode : {'a', 'w', 'r+'}, default 'a' Mode to open file: - 'w': write, a new file is created (an existing file with the same name would be deleted). - 'a': append, an existing file is opened for reading and writing, and if the file does not exist it is created. - 'r+': similar to 'a', but the file must already exist. complevel : {0-9}, default None Specifies a compression level for data. A value of 0 or None disables compression. complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib' Specifies the compression library to be used. As of v0.20.2 these additional compressors for Blosc are supported (default if no compressor specified: 'blosc:blosclz'): {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', 'blosc:zlib', 'blosc:zstd'}. Specifying a compression library which is not available issues a ValueError. append : bool, default False For Table formats, append the input data to the existing. format : {'fixed', 'table', None}, default 'fixed' Possible values: - 'fixed': Fixed format. Fast writing/reading. Not-appendable, nor searchable. - 'table': Table format. Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data. - If None, pd.get_option('io.hdf.default_format') is checked, followed by fallback to "fixed". errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. encoding : str, default "UTF-8" min_itemsize : dict or int, optional Map column names to minimum string sizes for columns. nan_rep : Any, optional How to represent null values as str. Not allowed with append=True. data_columns : list of columns or True, optional List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See :ref:`io.hdf5-query-data-columns`. Applicable only to format='table'. See Also -------- read_hdf : Read from HDF file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. DataFrame.to_sql : Write to a SQL table. DataFrame.to_feather : Write out feather-format for DataFrames. DataFrame.to_csv : Write out to a csv file. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, ... index=['a', 'b', 'c']) # doctest: +SKIP >>> df.to_hdf('data.h5', key='df', mode='w') # doctest: +SKIP We can add another object to the same file: >>> s = pd.Series([1, 2, 3, 4]) # doctest: +SKIP >>> s.to_hdf('data.h5', key='s') # doctest: +SKIP Reading from HDF file: >>> pd.read_hdf('data.h5', 'df') # doctest: +SKIP A B a 1 4 b 2 5 c 3 6 >>> pd.read_hdf('data.h5', 's') # doctest: +SKIP 0 1 1 2 2 3 3 4 dtype: int64 """ from pandas.io import pytables # Argument 3 to "to_hdf" has incompatible type "NDFrame"; expected # "Union[DataFrame, Series]" [arg-type] pytables.to_hdf( path_or_buf, key, self, # type: ignore[arg-type] mode=mode, complevel=complevel, complib=complib, append=append, format=format, index=index, min_itemsize=min_itemsize, nan_rep=nan_rep, dropna=dropna, data_columns=data_columns, errors=errors, encoding=encoding, ) @final def to_sql( self, name: str, con, schema=None, if_exists: str = "fail", index: bool_t = True, index_label=None, chunksize=None, dtype: DtypeArg | None = None, method=None, ) -> int | None: """ Write records stored in a DataFrame to a SQL database. Databases supported by SQLAlchemy [1]_ are supported. Tables can be newly created, appended to, or overwritten. Parameters ---------- name : str Name of SQL table. con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection Using SQLAlchemy makes it possible to use any DB supported by that library. Legacy support is provided for sqlite3.Connection objects. The user is responsible for engine disposal and connection closure for the SQLAlchemy connectable See `here \ <https://docs.sqlalchemy.org/en/13/core/connections.html>`_. schema : str, optional Specify the schema (if database flavor supports this). If None, use default schema. if_exists : {'fail', 'replace', 'append'}, default 'fail' How to behave if the table already exists. * fail: Raise a ValueError. * replace: Drop the table before inserting new values. * append: Insert new values to the existing table. index : bool, default True Write DataFrame index as a column. Uses `index_label` as the column name in the table. index_label : str or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Specify the number of rows in each batch to be written at a time. By default, all rows will be written at once. dtype : dict or scalar, optional Specifying the datatype for columns. If a dictionary is used, the keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 legacy mode. If a scalar is provided, it will be applied to all columns. method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. Returns ------- None or int Number of rows affected by to_sql. None is returned if the callable passed into ``method`` does not return the number of rows. The number of returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor`` or SQLAlchemy connectable which may not reflect the exact number of written rows as stipulated in the `sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or `SQLAlchemy <https://docs.sqlalchemy.org/en/14/core/connections.html#sqlalchemy.engine.BaseCursorResult.rowcount>`__. .. versionadded:: 1.4.0 Raises ------ ValueError When the table already exists and `if_exists` is 'fail' (the default). See Also -------- read_sql : Read a DataFrame from a table. Notes ----- Timezone aware datetime columns will be written as ``Timestamp with timezone`` type with SQLAlchemy if supported by the database. Otherwise, the datetimes will be stored as timezone unaware timestamps local to the original timezone. References ---------- .. [1] https://docs.sqlalchemy.org .. [2] https://www.python.org/dev/peps/pep-0249/ Examples -------- Create an in-memory SQLite database. >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite://', echo=False) Create a table from scratch with 3 rows. >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']}) >>> df name 0 User 1 1 User 2 2 User 3 >>> df.to_sql('users', con=engine) 3 >>> engine.execute("SELECT * FROM users").fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')] An `sqlalchemy.engine.Connection` can also be passed to `con`: >>> with engine.begin() as connection: ... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']}) ... df1.to_sql('users', con=connection, if_exists='append') 2 This is allowed to support operations that require that the same DBAPI connection is used for the entire operation. >>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']}) >>> df2.to_sql('users', con=engine, if_exists='append') 2 >>> engine.execute("SELECT * FROM users").fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'), (0, 'User 4'), (1, 'User 5'), (0, 'User 6'), (1, 'User 7')] Overwrite the table with just ``df2``. >>> df2.to_sql('users', con=engine, if_exists='replace', ... index_label='id') 2 >>> engine.execute("SELECT * FROM users").fetchall() [(0, 'User 6'), (1, 'User 7')] Specify the dtype (especially useful for integers with missing values). Notice that while pandas is forced to store the data as floating point, the database supports nullable integers. When fetching the data with Python, we get back integer scalars. >>> df = pd.DataFrame({"A": [1, None, 2]}) >>> df A 0 1.0 1 NaN 2 2.0 >>> from sqlalchemy.types import Integer >>> df.to_sql('integers', con=engine, index=False, ... dtype={"A": Integer()}) 3 >>> engine.execute("SELECT * FROM integers").fetchall() [(1,), (None,), (2,)] """ # noqa:E501 from pandas.io import sql return sql.to_sql( self, name, con, schema=schema, if_exists=if_exists, index=index, index_label=index_label, chunksize=chunksize, dtype=dtype, method=method, ) @final @doc( storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_pickle( self, path: FilePath | WriteBuffer[bytes], compression: CompressionOptions = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions = None, ) -> None: """ Pickle (serialize) object to file. Parameters ---------- path : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. File path where the pickled object will be stored. {compression_options} protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible values are 0, 1, 2, 3, 4, 5. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html. {storage_options} .. versionadded:: 1.2.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP >>> original_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> original_df.to_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 """ # noqa: E501 from pandas.io.pickle import to_pickle to_pickle( self, path, compression=compression, protocol=protocol, storage_options=storage_options, ) @final def to_clipboard( self, excel: bool_t = True, sep: str | None = None, **kwargs ) -> None: r""" Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. Parameters ---------- excel : bool, default True Produce output in a csv format for easy pasting into excel. - True, use the provided separator for csv pasting. - False, write a string representation of the object to the clipboard. sep : str, default ``'\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. See Also -------- DataFrame.to_csv : Write a DataFrame to a comma-separated values (csv) file. read_clipboard : Read text from clipboard and pass to read_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `PyQt4` modules) - Windows : none - macOS : none This method uses the processes developed for the package `pyperclip`. A solution to render any output string format is given in the examples. Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 Using the original `pyperclip` package for any string output format. .. code-block:: python import pyperclip html = df.style.to_html() pyperclip.copy(html) """ from pandas.io import clipboards clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs) @final def to_xarray(self): """ Return an xarray object from the pandas object. Returns ------- xarray.DataArray or xarray.Dataset Data in the pandas structure converted to Dataset if the object is a DataFrame, or a DataArray if the object is a Series. See Also -------- DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Notes ----- See the `xarray docs <https://xarray.pydata.org/en/stable/>`__ Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2), ... ('parrot', 'bird', 24.0, 2), ... ('lion', 'mammal', 80.5, 4), ... ('monkey', 'mammal', np.nan, 4)], ... columns=['name', 'class', 'max_speed', ... 'num_legs']) >>> df name class max_speed num_legs 0 falcon bird 389.0 2 1 parrot bird 24.0 2 2 lion mammal 80.5 4 3 monkey mammal NaN 4 >>> df.to_xarray() <xarray.Dataset> Dimensions: (index: 4) Coordinates: * index (index) int64 0 1 2 3 Data variables: name (index) object 'falcon' 'parrot' 'lion' 'monkey' class (index) object 'bird' 'bird' 'mammal' 'mammal' max_speed (index) float64 389.0 24.0 80.5 nan num_legs (index) int64 2 2 4 4 >>> df['max_speed'].to_xarray() <xarray.DataArray 'max_speed' (index: 4)> array([389. , 24. , 80.5, nan]) Coordinates: * index (index) int64 0 1 2 3 >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01', ... '2018-01-02', '2018-01-02']) >>> df_multiindex = pd.DataFrame({'date': dates, ... 'animal': ['falcon', 'parrot', ... 'falcon', 'parrot'], ... 'speed': [350, 18, 361, 15]}) >>> df_multiindex = df_multiindex.set_index(['date', 'animal']) >>> df_multiindex speed date animal 2018-01-01 falcon 350 parrot 18 2018-01-02 falcon 361 parrot 15 >>> df_multiindex.to_xarray() <xarray.Dataset> Dimensions: (animal: 2, date: 2) Coordinates: * date (date) datetime64[ns] 2018-01-01 2018-01-02 * animal (animal) object 'falcon' 'parrot' Data variables: speed (date, animal) int64 350 18 361 15 """ xarray = import_optional_dependency("xarray") if self.ndim == 1: return xarray.DataArray.from_series(self) else: return xarray.Dataset.from_dataframe(self) @final @doc(returns=fmt.return_docstring) def to_latex( self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep="NaN", formatters=None, float_format=None, sparsify=None, index_names=True, bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None, decimal=".", multicolumn=None, multicolumn_format=None, multirow=None, caption=None, label=None, position=None, ): r""" Render object to a LaTeX tabular, longtable, or nested table. Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted into a main LaTeX document or read from an external file with ``\input{{table.tex}}``. .. versionchanged:: 1.0.0 Added caption and label arguments. .. versionchanged:: 1.2.0 Added position argument, changed meaning of caption argument. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default 'NaN' Missing data representation. formatters : list of functions or dict of {{str: function}}, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function or str, optional, default None Formatter for floating point numbers. For example ``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will both result in 0.1234 being formatted as 0.12. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in `LaTeX table format <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3 columns. By default, 'l' will be used for all columns except columns of numbers, which default to 'r'. longtable : bool, optional By default, the value will be read from the pandas config module. Use a longtable environment instead of tabular. Requires adding a \usepackage{{longtable}} to your LaTeX preamble. escape : bool, optional By default, the value will be read from the pandas config module. When set to False prevents from escaping latex special characters in column names. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. multicolumn : bool, default True Use \multicolumn to enhance MultiIndex columns. The default will be read from the config module. multicolumn_format : str, default 'l' The alignment for multicolumns, similar to `column_format` The default will be read from the config module. multirow : bool, default False Use \multirow to enhance MultiIndex rows. Requires adding a \usepackage{{multirow}} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module. caption : str or tuple, optional Tuple (full_caption, short_caption), which results in ``\caption[short_caption]{{full_caption}}``; if a single string is passed, no short caption will be set. .. versionadded:: 1.0.0 .. versionchanged:: 1.2.0 Optionally allow caption to be a tuple ``(full_caption, short_caption)``. label : str, optional The LaTeX label to be placed inside ``\label{{}}`` in the output. This is used with ``\ref{{}}`` in the main ``.tex`` file. .. versionadded:: 1.0.0 position : str, optional The LaTeX positional argument for tables, to be placed after ``\begin{{}}`` in the output. .. versionadded:: 1.2.0 {returns} See Also -------- Styler.to_latex : Render a DataFrame to LaTeX with conditional formatting. DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Examples -------- >>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'], ... mask=['red', 'purple'], ... weapon=['sai', 'bo staff'])) >>> print(df.to_latex(index=False)) # doctest: +SKIP \begin{{tabular}}{{lll}} \toprule name & mask & weapon \\ \midrule Raphael & red & sai \\ Donatello & purple & bo staff \\ \bottomrule \end{{tabular}} """ msg = ( "In future versions `DataFrame.to_latex` is expected to utilise the base " "implementation of `Styler.to_latex` for formatting and rendering. " "The arguments signature may therefore change. It is recommended instead " "to use `DataFrame.style.to_latex` which also contains additional " "functionality." ) warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) # Get defaults from the pandas config if self.ndim == 1: self = self.to_frame() if longtable is None: longtable = config.get_option("display.latex.longtable") if escape is None: escape = config.get_option("display.latex.escape") if multicolumn is None: multicolumn = config.get_option("display.latex.multicolumn") if multicolumn_format is None: multicolumn_format = config.get_option("display.latex.multicolumn_format") if multirow is None: multirow = config.get_option("display.latex.multirow") self = cast("DataFrame", self) formatter = DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, index_names=index_names, escape=escape, decimal=decimal, ) return DataFrameRenderer(formatter).to_latex( buf=buf, column_format=column_format, longtable=longtable, encoding=encoding, multicolumn=multicolumn, multicolumn_format=multicolumn_format, multirow=multirow, caption=caption, label=label, position=position, ) @final @doc( storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"], ) @deprecate_kwarg(old_arg_name="line_terminator", new_arg_name="lineterminator") def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, sep: str = ",", na_rep: str = "", float_format: str | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | list[str] = True, index: bool_t = True, index_label: IndexLabel | None = None, mode: str = "w", encoding: str | None = None, compression: CompressionOptions = "infer", quoting: int | None = None, quotechar: str = '"', lineterminator: str | None = None, chunksize: int | None = None, date_format: str | None = None, doublequote: bool_t = True, escapechar: str | None = None, decimal: str = ".", errors: str = "strict", storage_options: StorageOptions = None, ) -> str | None: r""" Write object to a comma-separated values (csv) file. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. If a non-binary file object is passed, it should be opened with `newline=''`, disabling universal newlines. If a binary file object is passed, `mode` might need to contain a `'b'`. .. versionchanged:: 1.2.0 Support for binary file objects was introduced. sep : str, default ',' String of length 1. Field delimiter for the output file. na_rep : str, default '' Missing data representation. float_format : str, default None Format string for floating point numbers. columns : sequence, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, or False, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the object uses MultiIndex. If False do not print fields for index names. Use index_label=False for easier importing in R. mode : str Python write mode, default 'w'. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. `encoding` is not supported if `path_or_buf` is a non-binary file object. {compression_options} .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other entries as additional compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is supported for compression modes 'gzip', 'bz2', 'zstd', and 'zip'. .. versionchanged:: 1.2.0 Compression is supported for binary file objects. .. versionchanged:: 1.2.0 Previous versions forwarded dict entries for 'gzip' to `gzip.open` instead of `gzip.GzipFile` which prevented setting `mtime`. quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC will treat them as non-numeric. quotechar : str, default '\"' String of length 1. Character used to quote fields. lineterminator : str, optional The newline character or character sequence to use in the output file. Defaults to `os.linesep`, which depends on the OS in which this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.). .. versionchanged:: 1.5.0 Previously was line_terminator, changed for consistency with read_csv and the standard library 'csv' module. chunksize : int or None Rows to write at a time. date_format : str, default None Format string for datetime objects. doublequote : bool, default True Control quoting of `quotechar` inside a field. escapechar : str, default None String of length 1. Character used to escape `sep` and `quotechar` when appropriate. decimal : str, default '.' Character recognized as decimal separator. E.g. use ',' for European data. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 Returns ------- None or str If path_or_buf is None, returns the resulting csv format as a string. Otherwise returns None. See Also -------- read_csv : Load a CSV file into a DataFrame. to_excel : Write DataFrame to an Excel file. Examples -------- >>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}}) >>> df.to_csv(index=False) 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n' Create 'out.zip' containing 'out.csv' >>> compression_opts = dict(method='zip', ... archive_name='out.csv') # doctest: +SKIP >>> df.to_csv('out.zip', index=False, ... compression=compression_opts) # doctest: +SKIP To write a csv file to a new folder or nested folder you will first need to create it using either Pathlib or os: >>> from pathlib import Path # doctest: +SKIP >>> filepath = Path('folder/subfolder/out.csv') # doctest: +SKIP >>> filepath.parent.mkdir(parents=True, exist_ok=True) # doctest: +SKIP >>> df.to_csv(filepath) # doctest: +SKIP >>> import os # doctest: +SKIP >>> os.makedirs('folder/subfolder', exist_ok=True) # doctest: +SKIP >>> df.to_csv('folder/subfolder/out.csv') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() formatter = DataFrameFormatter( frame=df, header=header, index=index, na_rep=na_rep, float_format=float_format, decimal=decimal, ) return DataFrameRenderer(formatter).to_csv( path_or_buf, lineterminator=lineterminator, sep=sep, encoding=encoding, errors=errors, compression=compression, quoting=quoting, columns=columns, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, date_format=date_format, doublequote=doublequote, escapechar=escapechar, storage_options=storage_options, ) # ---------------------------------------------------------------------- # Lookup Caching def _reset_cacher(self) -> None: """ Reset the cacher. """ raise AbstractMethodError(self) def _maybe_update_cacher( self, clear: bool_t = False, verify_is_copy: bool_t = True, inplace: bool_t = False, ) -> None: """ See if we need to update our parent cacher if clear, then clear our cache. Parameters ---------- clear : bool, default False Clear the item cache. verify_is_copy : bool, default True Provide is_copy checks. """ if verify_is_copy: self._check_setitem_copy(t="referent") if clear: self._clear_item_cache() def _clear_item_cache(self) -> None: raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Indexing Methods def take( self: NDFrameT, indices, axis=0, is_copy: bool_t | None = None, **kwargs ) -> NDFrameT: """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. is_copy : bool Before pandas 1.0, ``is_copy=False`` can be specified to ensure that the return value is an actual copy. Starting with pandas 1.0, ``take`` always returns a copy, and the keyword is therefore deprecated. .. deprecated:: 1.0.0 **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- taken : same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=['name', 'class', 'max_speed'], ... index=[0, 2, 3, 1]) >>> df name class max_speed 0 falcon bird 389.0 2 parrot bird 24.0 3 lion mammal 80.5 1 monkey mammal NaN Take elements at positions 0 and 3 along the axis 0 (default). Note how the actual indices selected (0 and 1) do not correspond to our selected indices 0 and 3. That's because we are selecting the 0th and 3rd rows, not rows whose indices equal 0 and 3. >>> df.take([0, 3]) name class max_speed 0 falcon bird 389.0 1 monkey mammal NaN Take elements at indices 1 and 2 along the axis 1 (column selection). >>> df.take([1, 2], axis=1) class max_speed 0 bird 389.0 2 bird 24.0 3 mammal 80.5 1 mammal NaN We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> df.take([-1, -2]) name class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ if is_copy is not None: warnings.warn( "is_copy is deprecated and will be removed in a future version. " "'take' always returns a copy, so there is no need to specify this.", FutureWarning, stacklevel=find_stack_level(), ) nv.validate_take((), kwargs) return self._take(indices, axis) def _take( self: NDFrameT, indices, axis=0, convert_indices: bool_t = True, ) -> NDFrameT: """ Internal version of the `take` allowing specification of additional args. See the docstring of `take` for full explanation of the parameters. """ self._consolidate_inplace() new_data = self._mgr.take( indices, axis=self._get_block_manager_axis(axis), verify=True, convert_indices=convert_indices, ) return self._constructor(new_data).__finalize__(self, method="take") def _take_with_is_copy(self: NDFrameT, indices, axis=0) -> NDFrameT: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). See the docstring of `take` for full explanation of the parameters. """ result = self._take(indices=indices, axis=axis) # Maybe set copy if we didn't actually change the index. if not result._get_axis(axis).equals(self._get_axis(axis)): result._set_is_copy(self) return result @final def xs(self, key, axis=0, level=None, drop_level: bool_t = True): """ Return cross-section from the Series/DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis to retrieve cross-section on. level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. drop_level : bool, default True If False, returns object with same levels as self. Returns ------- Series or DataFrame Cross-section from the original Series or DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Notes ----- `xs` can not be used to set values. MultiIndex Slicers is a generic way to get/set values on any level or levels. It is a superset of `xs` functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = pd.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog')) num_legs num_wings locomotion walks 4 0 Get values at specified index and level >>> df.xs('cat', level=1) num_legs num_wings class locomotion mammal walks 4 0 Get values at several indexes and levels >>> df.xs(('bird', 'walks'), ... level=[0, 'locomotion']) num_legs num_wings animal penguin 2 2 Get values at specified column and axis >>> df.xs('num_wings', axis=1) class animal locomotion mammal cat walks 0 dog walks 0 bat flies 2 bird penguin walks 2 Name: num_wings, dtype: int64 """ axis = self._get_axis_number(axis) labels = self._get_axis(axis) if isinstance(key, list): warnings.warn( "Passing lists as key for xs is deprecated and will be removed in a " "future version. Pass key as a tuple instead.", FutureWarning, stacklevel=find_stack_level(), ) if level is not None: if not isinstance(labels, MultiIndex): raise TypeError("Index must be a MultiIndex") loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level) # create the tuple of the indexer _indexer = [slice(None)] * self.ndim _indexer[axis] = loc indexer = tuple(_indexer) result = self.iloc[indexer] setattr(result, result._get_axis_name(axis), new_ax) return result if axis == 1: if drop_level: return self[key] index = self.columns else: index = self.index self._consolidate_inplace() if isinstance(index, MultiIndex): loc, new_index = index._get_loc_level(key, level=0) if not drop_level: if lib.is_integer(loc): new_index = index[loc : loc + 1] else: new_index = index[loc] else: loc = index.get_loc(key) if isinstance(loc, np.ndarray): if loc.dtype == np.bool_: (inds,) = loc.nonzero() return self._take_with_is_copy(inds, axis=axis) else: return self._take_with_is_copy(loc, axis=axis) if not is_scalar(loc): new_index = index[loc] if is_scalar(loc) and axis == 0: # In this case loc should be an integer if self.ndim == 1: # if we encounter an array-like and we only have 1 dim # that means that their are list/ndarrays inside the Series! # so just return them (GH 6394) return self._values[loc] new_values = self._mgr.fast_xs(loc) result = self._constructor_sliced( new_values, index=self.columns, name=self.index[loc], dtype=new_values.dtype, ) elif is_scalar(loc): result = self.iloc[:, slice(loc, loc + 1)] elif axis == 1: result = self.iloc[:, loc] else: result = self.iloc[loc] result.index = new_index # this could be a view # but only in a single-dtyped view sliceable case result._set_is_copy(self, copy=not result._is_view) return result def __getitem__(self, item): raise AbstractMethodError(self) def _slice(self: NDFrameT, slobj: slice, axis=0) -> NDFrameT: """ Construct a slice of this container. Slicing with this method is *always* positional. """ assert isinstance(slobj, slice), type(slobj) axis = self._get_block_manager_axis(axis) result = self._constructor(self._mgr.get_slice(slobj, axis=axis)) result = result.__finalize__(self) # this could be a view # but only in a single-dtyped view sliceable case is_copy = axis != 0 or result._is_view result._set_is_copy(self, copy=is_copy) return result @final def _set_is_copy(self, ref: NDFrame, copy: bool_t = True) -> None: if not copy: self._is_copy = None else: assert ref is not None self._is_copy = weakref.ref(ref) def _check_is_chained_assignment_possible(self) -> bool_t: """ Check if we are a view, have a cacher, and are of mixed type. If so, then force a setitem_copy check. Should be called just near setting a value Will return a boolean if it we are a view and are cached, but a single-dtype meaning that the cacher should be updated following setting. """ if self._is_copy: self._check_setitem_copy(t="referent") return False @final def _check_setitem_copy(self, t="setting", force=False): """ Parameters ---------- t : str, the type of setting error force : bool, default False If True, then force showing an error. validate if we are doing a setitem on a chained copy. It is technically possible to figure out that we are setting on a copy even WITH a multi-dtyped pandas object. In other words, some blocks may be views while other are not. Currently _is_view will ALWAYS return False for multi-blocks to avoid having to handle this case. df = DataFrame(np.arange(0,9), columns=['count']) df['group'] = 'b' # This technically need not raise SettingWithCopy if both are view # (which is not generally guaranteed but is usually True. However, # this is in general not a good practice and we recommend using .loc. df.iloc[0:5]['group'] = 'a' """ # return early if the check is not needed if not (force or self._is_copy): return value = config.get_option("mode.chained_assignment") if value is None: return # see if the copy is not actually referred; if so, then dissolve # the copy weakref if self._is_copy is not None and not isinstance(self._is_copy, str): r = self._is_copy() if not gc.get_referents(r) or (r is not None and r.shape == self.shape): self._is_copy = None return # a custom message if isinstance(self._is_copy, str): t = self._is_copy elif t == "referent": t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame\n\n" "See the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) else: t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame.\n" "Try using .loc[row_indexer,col_indexer] = value " "instead\n\nSee the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) if value == "raise": raise com.SettingWithCopyError(t) elif value == "warn": warnings.warn(t, com.SettingWithCopyWarning, stacklevel=find_stack_level()) def __delitem__(self, key) -> None: """ Delete item """ deleted = False maybe_shortcut = False if self.ndim == 2 and isinstance(self.columns, MultiIndex): try: # By using engine's __contains__ we effectively # restrict to same-length tuples maybe_shortcut = key not in self.columns._engine except TypeError: pass if maybe_shortcut: # Allow shorthand to delete all columns whose first len(key) # elements match key: if not isinstance(key, tuple): key = (key,) for col in self.columns: if isinstance(col, tuple) and col[: len(key)] == key: del self[col] deleted = True if not deleted: # If the above loop ran and didn't delete anything because # there was no match, this call should raise the appropriate # exception: loc = self.axes[-1].get_loc(key) self._mgr = self._mgr.idelete(loc) # delete from the caches try: del self._item_cache[key] except KeyError: pass # ---------------------------------------------------------------------- # Unsorted @final def _check_inplace_and_allows_duplicate_labels(self, inplace): if inplace and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'inplace=True' when " "'self.flags.allows_duplicate_labels' is False." ) @final def get(self, key, default=None): """ Get item from object for given key (ex: DataFrame column). Returns default value if not found. Parameters ---------- key : object Returns ------- value : same type as items contained in object Examples -------- >>> df = pd.DataFrame( ... [ ... [24.3, 75.7, "high"], ... [31, 87.8, "high"], ... [22, 71.6, "medium"], ... [35, 95, "medium"], ... ], ... columns=["temp_celsius", "temp_fahrenheit", "windspeed"], ... index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"), ... ) >>> df temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df.get(["temp_celsius", "windspeed"]) temp_celsius windspeed 2014-02-12 24.3 high 2014-02-13 31.0 high 2014-02-14 22.0 medium 2014-02-15 35.0 medium If the key isn't found, the default value will be used. >>> df.get(["temp_celsius", "temp_kelvin"], default="default_value") 'default_value' """ try: return self[key] except (KeyError, ValueError, IndexError): return default @final @property def _is_view(self) -> bool_t: """Return boolean indicating if self is view of another array""" return self._mgr.is_view @final def reindex_like( self: NDFrameT, other, method: str | None = None, copy: bool_t = True, limit=None, tolerance=None, ) -> NDFrameT: """ Return an object with matching indices as other object. Conform the object to the same index on all axes. Optional filling logic, placing NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. Parameters ---------- other : Object of the same data type Its row and column indices are used to define the new indices of this object. method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: propagate last valid observation forward to next valid * backfill / bfill: use next valid observation to fill gap * nearest: use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. limit : int, default None Maximum number of consecutive labels to fill for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- Series or DataFrame Same type as caller, but with changed indices on each axis. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex : Change to new indices or expand indices. Notes ----- Same as calling ``.reindex(index=other.index, columns=other.columns,...)``. Examples -------- >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'], ... [31, 87.8, 'high'], ... [22, 71.6, 'medium'], ... [35, 95, 'medium']], ... columns=['temp_celsius', 'temp_fahrenheit', ... 'windspeed'], ... index=pd.date_range(start='2014-02-12', ... end='2014-02-15', freq='D')) >>> df1 temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df2 = pd.DataFrame([[28, 'low'], ... [30, 'low'], ... [35.1, 'medium']], ... columns=['temp_celsius', 'windspeed'], ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13', ... '2014-02-15'])) >>> df2 temp_celsius windspeed 2014-02-12 28.0 low 2014-02-13 30.0 low 2014-02-15 35.1 medium >>> df2.reindex_like(df1) temp_celsius temp_fahrenheit windspeed 2014-02-12 28.0 NaN low 2014-02-13 30.0 NaN low 2014-02-14 NaN NaN NaN 2014-02-15 35.1 NaN medium """ d = other._construct_axes_dict( axes=self._AXIS_ORDERS, method=method, copy=copy, limit=limit, tolerance=tolerance, ) return self.reindex(**d) def drop( self, labels=None, axis=0, index=None, columns=None, level=None, inplace: bool_t = False, errors: str = "raise", ): inplace = validate_bool_kwarg(inplace, "inplace") if labels is not None: if index is not None or columns is not None: raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis_name = self._get_axis_name(axis) axes = {axis_name: labels} elif index is not None or columns is not None: axes, _ = self._construct_axes_from_arguments((index, columns), {}) else: raise ValueError( "Need to specify at least one of 'labels', 'index' or 'columns'" ) obj = self for axis, labels in axes.items(): if labels is not None: obj = obj._drop_axis(labels, axis, level=level, errors=errors) if inplace: self._update_inplace(obj) else: return obj @final def _drop_axis( self: NDFrameT, labels, axis, level=None, errors: str = "raise", only_slice: bool_t = False, ) -> NDFrameT: """ Drop labels from specified axis. Used in the ``drop`` method internally. Parameters ---------- labels : single label or list-like axis : int or axis name level : int or level name, default None For MultiIndex errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. only_slice : bool, default False Whether indexing along columns should be view-only. """ axis_num = self._get_axis_number(axis) axis = self._get_axis(axis) if axis.is_unique: if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") new_axis = axis.drop(labels, level=level, errors=errors) else: new_axis = axis.drop(labels, errors=errors) indexer = axis.get_indexer(new_axis) # Case for non-unique axis else: is_tuple_labels = is_nested_list_like(labels) or isinstance(labels, tuple) labels = ensure_object(com.index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") mask = ~axis.get_level_values(level).isin(labels) # GH 18561 MultiIndex.drop should raise if label is absent if errors == "raise" and mask.all(): raise KeyError(f"{labels} not found in axis") elif ( isinstance(axis, MultiIndex) and labels.dtype == "object" and not is_tuple_labels ): # Set level to zero in case of MultiIndex and label is string, # because isin can't handle strings for MultiIndexes GH#36293 # In case of tuples we get dtype object but have to use isin GH#42771 mask = ~axis.get_level_values(0).isin(labels) else: mask = ~axis.isin(labels) # Check if label doesn't exist along axis labels_missing = (axis.get_indexer_for(labels) == -1).any() if errors == "raise" and labels_missing: raise KeyError(f"{labels} not found in axis") if is_extension_array_dtype(mask.dtype): # GH#45860 mask = mask.to_numpy(dtype=bool) indexer = mask.nonzero()[0] new_axis = axis.take(indexer) bm_axis = self.ndim - axis_num - 1 new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=bm_axis, allow_dups=True, only_slice=only_slice, ) result = self._constructor(new_mgr) if self.ndim == 1: result.name = self.name return result.__finalize__(self) @final def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None: """ Replace self internals with result. Parameters ---------- result : same type as self verify_is_copy : bool, default True Provide is_copy checks. """ # NOTE: This does *not* call __finalize__ and that's an explicit # decision that we may revisit in the future. self._reset_cache() self._clear_item_cache() self._mgr = result._mgr self._maybe_update_cacher(verify_is_copy=verify_is_copy, inplace=True) @final def add_prefix(self: NDFrameT, prefix: str) -> NDFrameT: """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_prefix('item_') item_0 1 item_1 2 item_2 3 item_3 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ f = functools.partial("{prefix}{}".format, prefix=prefix) mapper = {self._info_axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" return self._rename(**mapper) # type: ignore[return-value, arg-type] @final def add_suffix(self: NDFrameT, suffix: str) -> NDFrameT: """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_suffix('_item') 0_item 1 1_item 2 2_item 3 3_item 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ f = functools.partial("{}{suffix}".format, suffix=suffix) mapper = {self._info_axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" return self._rename(**mapper) # type: ignore[return-value, arg-type] def sort_values( self, axis=0, ascending=True, inplace: bool_t = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool_t = False, key: ValueKeyFunc = None, ): """ Sort by the values along either axis. Parameters ----------%(optional_by)s axis : %(axes_single_arg)s, default 0 Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. `mergesort` and `stable` are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 1.0.0 key : callable, optional Apply the key function to the values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return a Series with the same shape as the input. It will be applied to each column in `by` independently. .. versionadded:: 1.1.0 Returns ------- DataFrame or None DataFrame with sorted values or None if ``inplace=True``. See Also -------- DataFrame.sort_index : Sort a DataFrame by the index. Series.sort_values : Similar method for a Series. Examples -------- >>> df = pd.DataFrame({ ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... 'col4': ['a', 'B', 'c', 'D', 'e', 'F'] ... }) >>> df col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort by multiple columns >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 col4 1 A 1 1 B 0 A 2 0 a 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 col4 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B 3 NaN 8 4 D Putting NAs first >>> df.sort_values(by='col1', ascending=False, na_position='first') col1 col2 col3 col4 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B Sorting with a key function >>> df.sort_values(by='col4', key=lambda col: col.str.lower()) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Natural sort with the key argument, using the `natsort <https://github.com/SethMMorton/natsort>` package. >>> df = pd.DataFrame({ ... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'], ... "value": [10, 20, 30, 40, 50] ... }) >>> df time value 0 0hr 10 1 128hr 20 2 72hr 30 3 48hr 40 4 96hr 50 >>> from natsort import index_natsorted >>> df.sort_values( ... by="time", ... key=lambda x: np.argsort(index_natsorted(df["time"])) ... ) time value 0 0hr 10 3 48hr 40 2 72hr 30 4 96hr 50 1 128hr 20 """ raise AbstractMethodError(self) def sort_index( self, axis=0, level=None, ascending: bool_t | int | Sequence[bool_t | int] = True, inplace: bool_t = False, kind: str = "quicksort", na_position: str = "last", sort_remaining: bool_t = True, ignore_index: bool_t = False, key: IndexKeyFunc = None, ): inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) ascending = validate_ascending(ascending) target = self._get_axis(axis) indexer = get_indexer_indexer( target, level, ascending, kind, na_position, sort_remaining, key ) if indexer is None: if inplace: result = self else: result = self.copy() if ignore_index: result.index = default_index(len(self)) if inplace: return else: return result baxis = self._get_block_manager_axis(axis) new_data = self._mgr.take(indexer, axis=baxis, verify=False) # reconstruct axis if needed new_data.set_axis(baxis, new_data.axes[baxis]._sort_levels_monotonic()) if ignore_index: axis = 1 if isinstance(self, ABCDataFrame) else 0 new_data.set_axis(axis, default_index(len(indexer))) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="sort_index") @doc( klass=_shared_doc_kwargs["klass"], axes=_shared_doc_kwargs["axes"], optional_labels="", optional_axis="", ) def reindex(self: NDFrameT, *args, **kwargs) -> NDFrameT: """ Conform {klass} to new index with optional filling logic. Places NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and ``copy=False``. Parameters ---------- {optional_labels} {axes} : array-like, optional New labels / index to conform to, should be specified using keywords. Preferably an Index object to avoid duplicating data. {optional_axis} method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: Propagate last valid observation forward to next valid. * backfill / bfill: Use next valid observation to fill gap. * nearest: Use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. limit : int, default None Maximum number of consecutive elements to forward or backward fill. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations most satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- {klass} with changed index. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` * ``(labels, axis={{'index', 'columns'}}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] >>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301], ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}}, ... index=index) >>> df http_status response_time Firefox 200 0.04 Chrome 200 0.02 Safari 404 0.07 IE10 404 0.08 Konqueror 301 1.00 Create a new index and reindex the dataframe. By default values in the new index that do not have corresponding records in the dataframe are assigned ``NaN``. >>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', ... 'Chrome'] >>> df.reindex(new_index) http_status response_time Safari 404.0 0.07 Iceweasel NaN NaN Comodo Dragon NaN NaN IE10 404.0 0.08 Chrome 200.0 0.02 We can fill in the missing values by passing a value to the keyword ``fill_value``. Because the index is not monotonically increasing or decreasing, we cannot use arguments to the keyword ``method`` to fill the ``NaN`` values. >>> df.reindex(new_index, fill_value=0) http_status response_time Safari 404 0.07 Iceweasel 0 0.00 Comodo Dragon 0 0.00 IE10 404 0.08 Chrome 200 0.02 >>> df.reindex(new_index, fill_value='missing') http_status response_time Safari 404 0.07 Iceweasel missing missing Comodo Dragon missing missing IE10 404 0.08 Chrome 200 0.02 We can also reindex the columns. >>> df.reindex(columns=['http_status', 'user_agent']) http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN Or we can use "axis-style" keyword arguments >>> df.reindex(['http_status', 'user_agent'], axis="columns") http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') >>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}}, ... index=date_index) >>> df2 prices 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 Suppose we decide to expand the dataframe to cover a wider date range. >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') >>> df2.reindex(date_index2) prices 2009-12-29 NaN 2009-12-30 NaN 2009-12-31 NaN 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN The index entries that did not have a value in the original data frame (for example, '2009-12-29') are by default filled with ``NaN``. If desired, we can fill in the missing values using one of several options. For example, to back-propagate the last valid value to fill the ``NaN`` values, pass ``bfill`` as an argument to the ``method`` keyword. >>> df2.reindex(date_index2, method='bfill') prices 2009-12-29 100.0 2009-12-30 100.0 2009-12-31 100.0 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN Please note that the ``NaN`` value present in the original dataframe (at index value 2010-01-03) will not be filled by any of the value propagation schemes. This is because filling while reindexing does not look at dataframe values, but only compares the original and desired indexes. If you do want to fill in the ``NaN`` values present in the original dataframe, use the ``fillna()`` method. See the :ref:`user guide <basics.reindexing>` for more. """ # TODO: Decide if we care about having different examples for different # kinds # construct the args axes, kwargs = self._construct_axes_from_arguments(args, kwargs) method = missing.clean_reindex_fill_method(kwargs.pop("method", None)) level = kwargs.pop("level", None) copy = kwargs.pop("copy", True) limit = kwargs.pop("limit", None) tolerance = kwargs.pop("tolerance", None) fill_value = kwargs.pop("fill_value", None) # Series.reindex doesn't use / need the axis kwarg # We pop and ignore it here, to make writing Series/Frame generic code # easier kwargs.pop("axis", None) if kwargs: raise TypeError( "reindex() got an unexpected keyword " f'argument "{list(kwargs.keys())[0]}"' ) self._consolidate_inplace() # if all axes that are requested to reindex are equal, then only copy # if indicated must have index names equal here as well as values if all( self._get_axis(axis).identical(ax) for axis, ax in axes.items() if ax is not None ): if copy: return self.copy() return self # check if we are a multi reindex if self._needs_reindex_multi(axes, method, level): return self._reindex_multi(axes, copy, fill_value) # perform the reindex on the axes return self._reindex_axes( axes, level, limit, tolerance, method, fill_value, copy ).__finalize__(self, method="reindex") def _reindex_axes( self: NDFrameT, axes, level, limit, tolerance, method, fill_value, copy ) -> NDFrameT: """Perform the reindex for all the axes.""" obj = self for a in self._AXIS_ORDERS: labels = axes[a] if labels is None: continue ax = self._get_axis(a) new_index, indexer = ax.reindex( labels, level=level, limit=limit, tolerance=tolerance, method=method ) axis = self._get_axis_number(a) obj = obj._reindex_with_indexers( {axis: [new_index, indexer]}, fill_value=fill_value, copy=copy, allow_dups=False, ) # If we've made a copy once, no need to make another one copy = False return obj def _needs_reindex_multi(self, axes, method, level) -> bool_t: """Check if we do need a multi reindex.""" return ( (com.count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type ) def _reindex_multi(self, axes, copy, fill_value): raise AbstractMethodError(self) @final def _reindex_with_indexers( self: NDFrameT, reindexers, fill_value=None, copy: bool_t = False, allow_dups: bool_t = False, ) -> NDFrameT: """allow_dups indicates an internal call here""" # reindex doing multiple operations on different axes if indicated new_data = self._mgr for axis in sorted(reindexers.keys()): index, indexer = reindexers[axis] baxis = self._get_block_manager_axis(axis) if index is None: continue index = ensure_index(index) if indexer is not None: indexer = ensure_platform_int(indexer) # TODO: speed up on homogeneous DataFrame objects (see _reindex_multi) new_data = new_data.reindex_indexer( index, indexer, axis=baxis, fill_value=fill_value, allow_dups=allow_dups, copy=copy, ) # If we've made a copy once, no need to make another one copy = False if copy and new_data is self._mgr: new_data = new_data.copy() return self._constructor(new_data).__finalize__(self) def filter( self: NDFrameT, items=None, like: str | None = None, regex: str | None = None, axis=None, ) -> NDFrameT: """ Subset the dataframe rows or columns according to the specified index labels. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : str Keep labels from axis for which "like in label == True". regex : str (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : {0 or ‘index’, 1 or ‘columns’, None}, default None The axis to filter on, expressed either as an index (int) or axis name (str). By default this is the info axis, 'index' for Series, 'columns' for DataFrame. Returns ------- same type as input object See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> df one two three mouse 1 2 3 rabbit 4 5 6 >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 """ nkw = com.count_not_none(items, like, regex) if nkw > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive" ) if axis is None: axis = self._info_axis_name labels = self._get_axis(axis) if items is not None: name = self._get_axis_name(axis) return self.reindex(**{name: [r for r in items if r in labels]}) elif like: def f(x) -> bool_t: assert like is not None # needed for mypy return like in ensure_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: def f(x) -> bool_t: return matcher.search(ensure_str(x)) is not None matcher = re.compile(regex) values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError("Must pass either `items`, `like`, or `regex`") @final def head(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. For negative values of `n`, this function returns all rows except the last `n` rows, equivalent to ``df[:-n]``. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- same type as caller The first `n` rows of the caller object. See Also -------- DataFrame.tail: Returns the last `n` rows. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon For negative values of `n` >>> df.head(-3) animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot """ return self.iloc[:n] @final def tail(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the last `n` rows. This function returns last `n` rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. For negative values of `n`, this function returns all rows except the first `n` rows, equivalent to ``df[n:]``. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- type of caller The last `n` rows of the caller object. See Also -------- DataFrame.head : The first `n` rows of the caller object. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last 5 lines >>> df.tail() animal 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last `n` lines (three in this case) >>> df.tail(3) animal 6 shark 7 whale 8 zebra For negative values of `n` >>> df.tail(-3) animal 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra """ if n == 0: return self.iloc[0:0] return self.iloc[-n:] @final def sample( self: NDFrameT, n: int | None = None, frac: float | None = None, replace: bool_t = False, weights=None, random_state: RandomState | None = None, axis: Axis | None = None, ignore_index: bool_t = False, ) -> NDFrameT: """ Return a random sample of items from an axis of object. You can use `random_state` for reproducibility. Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with `frac`. Default = 1 if `frac` = None. frac : float, optional Fraction of axis items to return. Cannot be used with `n`. replace : bool, default False Allow or disallow sampling of the same row more than once. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. Infinite values not allowed. random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional If int, array-like, or BitGenerator, seed for random number generator. If np.random.RandomState or np.random.Generator, use as given. .. versionchanged:: 1.1.0 array-like and BitGenerator object now passed to np.random.RandomState() as seed .. versionchanged:: 1.4.0 np.random.Generator objects now accepted axis : {0 or ‘index’, 1 or ‘columns’, None}, default None Axis to sample. Accepts axis number or name. Default is stat axis for given data type (0 for Series and DataFrames). ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.3.0 Returns ------- Series or DataFrame A new object of same type as caller containing `n` items randomly sampled from the caller object. See Also -------- DataFrameGroupBy.sample: Generates random samples from each group of a DataFrame object. SeriesGroupBy.sample: Generates random samples from each group of a Series object. numpy.random.choice: Generates a random sample from a given 1-D numpy array. Notes ----- If `frac` > 1, `replacement` should be set to `True`. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish']) >>> df num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 Extract 3 random elements from the ``Series`` ``df['num_legs']``: Note that we use `random_state` to ensure the reproducibility of the examples. >>> df['num_legs'].sample(n=3, random_state=1) fish 0 spider 8 falcon 2 Name: num_legs, dtype: int64 A random 50% sample of the ``DataFrame`` with replacement: >>> df.sample(frac=0.5, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 An upsample sample of the ``DataFrame`` with replacement: Note that `replace` parameter has to be `True` for `frac` parameter > 1. >>> df.sample(frac=2, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 falcon 2 2 10 falcon 2 2 10 fish 0 0 8 dog 4 0 2 fish 0 0 8 dog 4 0 2 Using a DataFrame column as weights. Rows with larger value in the `num_specimen_seen` column are more likely to be sampled. >>> df.sample(n=2, weights='num_specimen_seen', random_state=1) num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 """ # noqa:E501 if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) obj_len = self.shape[axis] # Process random_state argument rs = com.random_state(random_state) size = sample.process_sampling_size(n, frac, replace) if size is None: assert frac is not None size = round(frac * obj_len) if weights is not None: weights = sample.preprocess_weights(self, weights, axis) sampled_indices = sample.sample(obj_len, size, replace, weights, rs) result = self.take(sampled_indices, axis=axis) if ignore_index: result.index = default_index(len(result)) return result @final @doc(klass=_shared_doc_kwargs["klass"]) def pipe( self, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs, ) -> T: r""" Apply chainable functions that expect Series or DataFrames. Parameters ---------- func : function Function to apply to the {klass}. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the {klass}. args : iterable, optional Positional arguments passed into ``func``. kwargs : mapping, optional A dictionary of keyword arguments passed into ``func``. Returns ------- object : the return type of ``func``. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Notes ----- Use ``.pipe`` when chaining together functions that expect Series, DataFrames or GroupBy objects. Instead of writing >>> func(g(h(df), arg1=a), arg2=b, arg3=c) # doctest: +SKIP You can write >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe(func, arg2=b, arg3=c) ... ) # doctest: +SKIP If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``f`` takes its data as ``arg2``: >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe((func, 'arg2'), arg1=a, arg3=c) ... ) # doctest: +SKIP """ return com.pipe(self, func, *args, **kwargs) # ---------------------------------------------------------------------- # Attribute access @final def __finalize__( self: NDFrameT, other, method: str | None = None, **kwargs ) -> NDFrameT: """ Propagate metadata from other to self. Parameters ---------- other : the object from which to get the attributes that we are going to propagate method : str, optional A passed method name providing context on where ``__finalize__`` was called. .. warning:: The value passed as `method` are not currently considered stable across pandas releases. """ if isinstance(other, NDFrame): for name in other.attrs: self.attrs[name] = other.attrs[name] self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels # For subclasses using _metadata. for name in set(self._metadata) & set(other._metadata): assert isinstance(name, str) object.__setattr__(self, name, getattr(other, name, None)) if method == "concat": attrs = other.objs[0].attrs check_attrs = all(objs.attrs == attrs for objs in other.objs[1:]) if check_attrs: for name in attrs: self.attrs[name] = attrs[name] allows_duplicate_labels = all( x.flags.allows_duplicate_labels for x in other.objs ) self.flags.allows_duplicate_labels = allows_duplicate_labels return self def __getattr__(self, name: str): """ After regular attribute access, try looking up the name This allows simpler access to columns for interactive use. """ # Note: obj.x will always call obj.__getattribute__('x') prior to # calling obj.__getattr__('x'). if ( name not in self._internal_names_set and name not in self._metadata and name not in self._accessors and self._info_axis._can_hold_identifiers_and_holds_name(name) ): return self[name] return object.__getattribute__(self, name) def __setattr__(self, name: str, value) -> None: """ After regular attribute access, try setting the name This allows simpler access to columns for interactive use. """ # first try regular attribute access via __getattribute__, so that # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify # the same attribute. try: object.__getattribute__(self, name) return object.__setattr__(self, name, value) except AttributeError: pass # if this fails, go on to more involved attribute setting # (note that this matches __getattr__, above). if name in self._internal_names_set: object.__setattr__(self, name, value) elif name in self._metadata: object.__setattr__(self, name, value) else: try: existing = getattr(self, name) if isinstance(existing, Index): object.__setattr__(self, name, value) elif name in self._info_axis: self[name] = value else: object.__setattr__(self, name, value) except (AttributeError, TypeError): if isinstance(self, ABCDataFrame) and (is_list_like(value)): warnings.warn( "Pandas doesn't allow columns to be " "created via a new attribute name - see " "https://pandas.pydata.org/pandas-docs/" "stable/indexing.html#attribute-access", stacklevel=find_stack_level(), ) object.__setattr__(self, name, value) @final def _dir_additions(self) -> set[str]: """ add the string-like attributes from the info_axis. If info_axis is a MultiIndex, its first level values are used. """ additions = super()._dir_additions() if self._info_axis._can_hold_strings: additions.update(self._info_axis._dir_additions_for_owner) return additions # ---------------------------------------------------------------------- # Consolidation of internals @final def _protect_consolidate(self, f): """ Consolidate _mgr -- if the blocks have changed, then clear the cache """ if isinstance(self._mgr, (ArrayManager, SingleArrayManager)): return f() blocks_before = len(self._mgr.blocks) result = f() if len(self._mgr.blocks) != blocks_before: self._clear_item_cache() return result @final def _consolidate_inplace(self) -> None: """Consolidate data in place and return None""" def f(): self._mgr = self._mgr.consolidate() self._protect_consolidate(f) @final def _consolidate(self): """ Compute NDFrame with "consolidated" internals (data of each dtype grouped together in a single ndarray). Returns ------- consolidated : same type as caller """ f = lambda: self._mgr.consolidate() cons_data = self._protect_consolidate(f) return self._constructor(cons_data).__finalize__(self) @final @property def _is_mixed_type(self) -> bool_t: if self._mgr.is_single_block: return False if self._mgr.any_extension_types: # Even if they have the same dtype, we can't consolidate them, # so we pretend this is "mixed'" return True return self.dtypes.nunique() > 1 @final def _check_inplace_setting(self, value) -> bool_t: """check whether we allow in-place setting with this type of value""" if self._is_mixed_type and not self._mgr.is_numeric_mixed_type: # allow an actual np.nan thru if is_float(value) and np.isnan(value): return True raise TypeError( "Cannot do inplace boolean setting on " "mixed-types with a non np.nan value" ) return True @final def _get_numeric_data(self): return self._constructor(self._mgr.get_numeric_data()).__finalize__(self) @final def _get_bool_data(self): return self._constructor(self._mgr.get_bool_data()).__finalize__(self) # ---------------------------------------------------------------------- # Internal Interface Methods @property def values(self) -> np.ndarray: raise AbstractMethodError(self) @property def _values(self) -> np.ndarray: """internal implementation""" raise AbstractMethodError(self) @property def dtypes(self): """ Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object """ data = self._mgr.get_dtypes() return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_) def astype( self: NDFrameT, dtype, copy: bool_t = True, errors: str = "raise" ) -> NDFrameT: """ Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- dtype : data type, or dict of column name -> data type Use a numpy.dtype or Python type to cast entire pandas object to the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. copy : bool, default True Return a copy when ``copy=True`` (be very careful setting ``copy=False`` as changes to values then may propagate to other pandas objects). errors : {'raise', 'ignore'}, default 'raise' Control raising of exceptions on invalid data for provided dtype. - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object. Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. numpy.ndarray.astype : Cast a numpy array to a specified type. Notes ----- .. deprecated:: 1.3.0 Using ``astype`` to convert from timezone-naive dtype to timezone-aware dtype is deprecated and will raise in a future version. Use :meth:`Series.dt.tz_localize` instead. Examples -------- Create a DataFrame: >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df.dtypes col1 int64 col2 int64 dtype: object Cast all columns to int32: >>> df.astype('int32').dtypes col1 int32 col2 int32 dtype: object Cast col1 to int32 using a dictionary: >>> df.astype({'col1': 'int32'}).dtypes col1 int32 col2 int64 dtype: object Create a series: >>> ser = pd.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 dtype: int32 >>> ser.astype('int64') 0 1 1 2 dtype: int64 Convert to categorical type: >>> ser.astype('category') 0 1 1 2 dtype: category Categories (2, int64): [1, 2] Convert to ordered categorical type with custom ordering: >>> from pandas.api.types import CategoricalDtype >>> cat_dtype = CategoricalDtype( ... categories=[2, 1], ordered=True) >>> ser.astype(cat_dtype) 0 1 1 2 dtype: category Categories (2, int64): [2 < 1] Note that using ``copy=False`` and changing data on a new pandas object may propagate changes: >>> s1 = pd.Series([1, 2]) >>> s2 = s1.astype('int64', copy=False) >>> s2[0] = 10 >>> s1 # note that s1[0] has changed too 0 10 1 2 dtype: int64 Create a series of dates: >>> ser_date = pd.Series(pd.date_range('20200101', periods=3)) >>> ser_date 0 2020-01-01 1 2020-01-02 2 2020-01-03 dtype: datetime64[ns] """ if is_dict_like(dtype): if self.ndim == 1: # i.e. Series if len(dtype) > 1 or self.name not in dtype: raise KeyError( "Only the Series name can be used for " "the key in Series dtype mappings." ) new_type = dtype[self.name] return self.astype(new_type, copy, errors) # GH#44417 cast to Series so we can use .iat below, which will be # robust in case we from pandas import Series dtype_ser = Series(dtype, dtype=object) for col_name in dtype_ser.index: if col_name not in self: raise KeyError( "Only a column name can be used for the " "key in a dtype mappings argument. " f"'{col_name}' not found in columns." ) dtype_ser = dtype_ser.reindex(self.columns, fill_value=None, copy=False) results = [] for i, (col_name, col) in enumerate(self.items()): cdt = dtype_ser.iat[i] if isna(cdt): res_col = col.copy() if copy else col else: res_col = col.astype(dtype=cdt, copy=copy, errors=errors) results.append(res_col) elif is_extension_array_dtype(dtype) and self.ndim > 1: # GH 18099/22869: columnwise conversion to extension dtype # GH 24704: use iloc to handle duplicate column names # TODO(EA2D): special case not needed with 2D EAs results = [ self.iloc[:, i].astype(dtype, copy=copy) for i in range(len(self.columns)) ] else: # else, only a single dtype is given new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors) return self._constructor(new_data).__finalize__(self, method="astype") # GH 33113: handle empty frame or series if not results: return self.copy() # GH 19920: retain column metadata after concat result = concat(results, axis=1, copy=False) # GH#40810 retain subclass # error: Incompatible types in assignment # (expression has type "NDFrameT", variable has type "DataFrame") result = self._constructor(result) # type: ignore[assignment] result.columns = self.columns result = result.__finalize__(self, method="astype") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) @final def copy(self: NDFrameT, deep: bool_t = True) -> NDFrameT: """ Make a copy of this object's indices and data. When ``deep=True`` (default), a new object will be created with a copy of the calling object's data and indices. Modifications to the data or indices of the copy will not be reflected in the original object (see notes below). When ``deep=False``, a new object will be created without copying the calling object's data or index (only references to the data and index are copied). Any changes to the data of the original will be reflected in the shallow copy (and vice versa). Parameters ---------- deep : bool, default True Make a deep copy, including a copy of the data and the indices. With ``deep=False`` neither the indices nor the data are copied. Returns ------- copy : Series or DataFrame Object type matches caller. Notes ----- When ``deep=True``, data is copied but actual Python objects will not be copied recursively, only the reference to the object. This is in contrast to `copy.deepcopy` in the Standard Library, which recursively copies object data (see examples below). While ``Index`` objects are copied when ``deep=True``, the underlying numpy array is not copied for performance reasons. Since ``Index`` is immutable, the underlying data can be safely shared and a copy is not needed. Since pandas is not thread safe, see the :ref:`gotchas <gotchas.thread-safety>` when copying in a threading environment. Examples -------- >>> s = pd.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 dtype: int64 **Shallow copy versus default (deep) copy:** >>> s = pd.Series([1, 2], index=["a", "b"]) >>> deep = s.copy() >>> shallow = s.copy(deep=False) Shallow copy shares data and index with original. >>> s is shallow False >>> s.values is shallow.values and s.index is shallow.index True Deep copy has own copy of data and index. >>> s is deep False >>> s.values is deep.values or s.index is deep.index False Updates to the data shared by shallow copy and original is reflected in both; deep copy remains unchanged. >>> s[0] = 3 >>> shallow[1] = 4 >>> s a 3 b 4 dtype: int64 >>> shallow a 3 b 4 dtype: int64 >>> deep a 1 b 2 dtype: int64 Note that when copying an object containing Python objects, a deep copy will copy the data, but will not do so recursively. Updating a nested data object will be reflected in the deep copy. >>> s = pd.Series([[1, 2], [3, 4]]) >>> deep = s.copy() >>> s[0][0] = 10 >>> s 0 [10, 2] 1 [3, 4] dtype: object >>> deep 0 [10, 2] 1 [3, 4] dtype: object """ data = self._mgr.copy(deep=deep) self._clear_item_cache() return self._constructor(data).__finalize__(self, method="copy") @final def __copy__(self: NDFrameT, deep: bool_t = True) -> NDFrameT: return self.copy(deep=deep) @final def __deepcopy__(self: NDFrameT, memo=None) -> NDFrameT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) @final def _convert( self: NDFrameT, datetime: bool_t = False, numeric: bool_t = False, timedelta: bool_t = False, ) -> NDFrameT: """ Attempt to infer better dtype for object columns. Parameters ---------- datetime : bool, default False If True, convert to date where possible. numeric : bool, default False If True, attempt to convert to numbers (including strings), with unconvertible values becoming NaN. timedelta : bool, default False If True, convert to timedelta where possible. Returns ------- converted : same as input object """ validate_bool_kwarg(datetime, "datetime") validate_bool_kwarg(numeric, "numeric") validate_bool_kwarg(timedelta, "timedelta") return self._constructor( self._mgr.convert( datetime=datetime, numeric=numeric, timedelta=timedelta, copy=True, ) ).__finalize__(self) @final def infer_objects(self: NDFrameT) -> NDFrameT: """ Attempt to infer better dtypes for object columns. Attempts soft conversion of object-dtyped columns, leaving non-object and unconvertible columns unchanged. The inference rules are the same as during normal Series/DataFrame construction. Returns ------- converted : same type as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. convert_dtypes : Convert argument to best possible dtype. Examples -------- >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]}) >>> df = df.iloc[1:] >>> df A 1 1 2 2 3 3 >>> df.dtypes A object dtype: object >>> df.infer_objects().dtypes A int64 dtype: object """ # numeric=False necessary to only soft convert; # python objects will still be converted to # native numpy numeric types return self._constructor( self._mgr.convert(datetime=True, numeric=False, timedelta=True, copy=True) ).__finalize__(self, method="infer_objects") @final def convert_dtypes( self: NDFrameT, infer_objects: bool_t = True, convert_string: bool_t = True, convert_integer: bool_t = True, convert_boolean: bool_t = True, convert_floating: bool_t = True, ) -> NDFrameT: """ Convert columns to best possible dtypes using dtypes supporting ``pd.NA``. .. versionadded:: 1.0.0 Parameters ---------- infer_objects : bool, default True Whether object dtypes should be converted to the best possible types. convert_string : bool, default True Whether object dtypes should be converted to ``StringDtype()``. convert_integer : bool, default True Whether, if possible, conversion can be done to integer extension types. convert_boolean : bool, defaults True Whether object dtypes should be converted to ``BooleanDtypes()``. convert_floating : bool, defaults True Whether, if possible, conversion can be done to floating extension types. If `convert_integer` is also True, preference will be give to integer dtypes if the floats can be faithfully casted to integers. .. versionadded:: 1.2.0 Returns ------- Series or DataFrame Copy of input object with new dtype. See Also -------- infer_objects : Infer dtypes of objects. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. Notes ----- By default, ``convert_dtypes`` will attempt to convert a Series (or each Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options ``convert_string``, ``convert_integer``, ``convert_boolean`` and ``convert_boolean``, it is possible to turn off individual conversions to ``StringDtype``, the integer extension types, ``BooleanDtype`` or floating extension types, respectively. For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference rules as during normal Series/DataFrame construction. Then, if possible, convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer or floating extension type, otherwise leave as ``object``. If the dtype is integer, convert to an appropriate integer extension type. If the dtype is numeric, and consists of all integers, convert to an appropriate integer extension type. Otherwise, convert to an appropriate floating extension type. .. versionchanged:: 1.2 Starting with pandas 1.2, this method also converts float columns to the nullable floating extension type. In the future, as new dtypes are added that support ``pd.NA``, the results of this method will change to support those new dtypes. Examples -------- >>> df = pd.DataFrame( ... { ... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")), ... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")), ... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")), ... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")), ... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")), ... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")), ... } ... ) Start with a DataFrame with default dtypes. >>> df a b c d e f 0 1 x True h 10.0 NaN 1 2 y False i NaN 100.5 2 3 z NaN NaN 20.0 200.0 >>> df.dtypes a int32 b object c object d object e float64 f float64 dtype: object Convert the DataFrame to use best possible dtypes. >>> dfn = df.convert_dtypes() >>> dfn a b c d e f 0 1 x True h 10 <NA> 1 2 y False i <NA> 100.5 2 3 z <NA> <NA> 20 200.0 >>> dfn.dtypes a Int32 b string c boolean d string e Int64 f Float64 dtype: object Start with a Series of strings and missing data represented by ``np.nan``. >>> s = pd.Series(["a", "b", np.nan]) >>> s 0 a 1 b 2 NaN dtype: object Obtain a Series with dtype ``StringDtype``. >>> s.convert_dtypes() 0 a 1 b 2 <NA> dtype: string """ if self.ndim == 1: return self._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, ) else: results = [ col._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, ) for col_name, col in self.items() ] if len(results) > 0: result = concat(results, axis=1, copy=False, keys=self.columns) cons = cast(Type["DataFrame"], self._constructor) result = cons(result) result = result.__finalize__(self, method="convert_dtypes") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) else: return self.copy() # ---------------------------------------------------------------------- # Filling NA's @doc(**_shared_doc_kwargs) def fillna( self: NDFrameT, value=None, method=None, axis=None, inplace: bool_t = False, limit=None, downcast=None, ) -> NDFrameT | None: """ Fill NA/NaN values using the specified method. Parameters ---------- value : scalar, dict, Series, or DataFrame Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of values specifying which value to use for each index (for a Series) or column (for a DataFrame). Values not in the dict/Series/DataFrame will not be filled. This value cannot be a list. method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None Method to use for filling holes in reindexed Series pad / ffill: propagate last valid observation forward to next valid backfill / bfill: use next valid observation to fill gap. axis : {axes_single_arg} Axis along which to fill missing values. inplace : bool, default False If True, fill in-place. Note: this will modify any other views on this object (e.g., a no-copy slice for a column in a DataFrame). limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. See Also -------- interpolate : Fill NaN values using interpolation. reindex : Conform object to new index. asfreq : Convert TimeSeries to specified frequency. Examples -------- >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], ... [3, 4, np.nan, 1], ... [np.nan, np.nan, np.nan, np.nan], ... [np.nan, 3, np.nan, 4]], ... columns=list("ABCD")) >>> df A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 NaN NaN NaN NaN 3 NaN 3.0 NaN 4.0 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 0.0 3 0.0 3.0 0.0 4.0 We can also propagate non-null values forward or backward. >>> df.fillna(method="ffill") A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 3.0 4.0 NaN 1.0 3 3.0 3.0 NaN 4.0 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {{"A": 0, "B": 1, "C": 2, "D": 3}} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 2.0 1.0 2 0.0 1.0 2.0 3.0 3 0.0 3.0 2.0 4.0 Only replace the first NaN element. >>> df.fillna(value=values, limit=1) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 NaN 1.0 2 NaN 1.0 NaN 3.0 3 NaN 3.0 NaN 4.0 When filling using a DataFrame, replacement happens along the same column names and same indices >>> df2 = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCE")) >>> df.fillna(df2) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 NaN 3 0.0 3.0 0.0 4.0 Note that column D is not affected since it is not present in df2. """ inplace = validate_bool_kwarg(inplace, "inplace") value, method = validate_fillna_kwargs(value, method) self._consolidate_inplace() # set the default here, so functions examining the signaure # can detect if something was set (e.g. in groupby) (GH9221) if axis is None: axis = 0 axis = self._get_axis_number(axis) if value is None: if not self._mgr.is_single_block and axis == 1: if inplace: raise NotImplementedError() result = self.T.fillna(method=method, limit=limit).T return result new_data = self._mgr.interpolate( method=method, axis=axis, limit=limit, inplace=inplace, downcast=downcast, ) else: if self.ndim == 1: if isinstance(value, (dict, ABCSeries)): if not len(value): # test_fillna_nonscalar if inplace: return None return self.copy() value = create_series_with_explicit_dtype( value, dtype_if_empty=object ) value = value.reindex(self.index, copy=False) value = value._values elif not is_list_like(value): pass else: raise TypeError( '"value" parameter must be a scalar, dict ' "or Series, but you passed a " f'"{type(value).__name__}"' ) new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, (dict, ABCSeries)): if axis == 1: raise NotImplementedError( "Currently only can fill " "with dict/Series column " "by column" ) result = self if inplace else self.copy() is_dict = isinstance(downcast, dict) for k, v in value.items(): if k not in result: continue downcast_k = downcast if not is_dict else downcast.get(k) result[k] = result[k].fillna(v, limit=limit, downcast=downcast_k) return result if not inplace else None elif not is_list_like(value): if not self._mgr.is_single_block and axis == 1: result = self.T.fillna(value=value, limit=limit).T new_data = result else: new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, ABCDataFrame) and self.ndim == 2: new_data = self.where(self.notna(), value)._mgr else: raise ValueError(f"invalid fill value with a {type(value)}") result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="fillna") @doc(klass=_shared_doc_kwargs["klass"]) def ffill( self: NDFrameT, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast=None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) pad = ffill @doc(klass=_shared_doc_kwargs["klass"]) def bfill( self: NDFrameT, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast=None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) backfill = bfill @doc( _shared_docs["replace"], klass=_shared_doc_kwargs["klass"], inplace=_shared_doc_kwargs["inplace"], replace_iloc=_shared_doc_kwargs["replace_iloc"], ) def replace( self, to_replace=None, value=lib.no_default, inplace: bool_t = False, limit: int | None = None, regex=False, method=lib.no_default, ): if not ( is_scalar(to_replace) or is_re_compilable(to_replace) or is_list_like(to_replace) ): raise TypeError( "Expecting 'to_replace' to be either a scalar, array-like, " "dict or None, got invalid type " f"{repr(type(to_replace).__name__)}" ) inplace = validate_bool_kwarg(inplace, "inplace") if not is_bool(regex) and to_replace is not None: raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") self._consolidate_inplace() if value is lib.no_default or method is not lib.no_default: # GH#36984 if the user explicitly passes value=None we want to # respect that. We have the corner case where the user explicitly # passes value=None *and* a method, which we interpret as meaning # they want the (documented) default behavior. if method is lib.no_default: # TODO: get this to show up as the default in the docs? method = "pad" # passing a single value that is scalar like # when value is None (GH5319), for compat if not is_dict_like(to_replace) and not is_dict_like(regex): to_replace = [to_replace] if isinstance(to_replace, (tuple, list)): if isinstance(self, ABCDataFrame): from pandas import Series result = self.apply( Series._replace_single, args=(to_replace, method, inplace, limit), ) if inplace: return return result self = cast("Series", self) return self._replace_single(to_replace, method, inplace, limit) if not is_dict_like(to_replace): if not is_dict_like(regex): raise TypeError( 'If "to_replace" and "value" are both None ' 'and "to_replace" is not a list, then ' "regex must be a mapping" ) to_replace = regex regex = True items = list(to_replace.items()) if items: keys, values = zip(*items) else: keys, values = ([], []) are_mappings = [is_dict_like(v) for v in values] if any(are_mappings): if not all(are_mappings): raise TypeError( "If a nested mapping is passed, all values " "of the top level mapping must be mappings" ) # passed a nested dict/Series to_rep_dict = {} value_dict = {} for k, v in items: keys, values = list(zip(*v.items())) or ([], []) to_rep_dict[k] = list(keys) value_dict[k] = list(values) to_replace, value = to_rep_dict, value_dict else: to_replace, value = keys, values return self.replace( to_replace, value, inplace=inplace, limit=limit, regex=regex ) else: # need a non-zero len on all axes if not self.size: if inplace: return return self.copy() if is_dict_like(to_replace): if is_dict_like(value): # {'A' : NA} -> {'A' : 0} # Note: Checking below for `in foo.keys()` instead of # `in foo` is needed for when we have a Series and not dict mapping = { col: (to_replace[col], value[col]) for col in to_replace.keys() if col in value.keys() and col in self } return self._replace_columnwise(mapping, inplace, regex) # {'A': NA} -> 0 elif not is_list_like(value): # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-like to_replace " "and non-None value" ) mapping = { col: (to_rep, value) for col, to_rep in to_replace.items() } return self._replace_columnwise(mapping, inplace, regex) else: raise TypeError("value argument must be scalar, dict, or Series") elif is_list_like(to_replace): if not is_list_like(value): # e.g. to_replace = [NA, ''] and value is 0, # so we replace NA with 0 and then replace '' with 0 value = [value] * len(to_replace) # e.g. we have to_replace = [NA, ''] and value = [0, 'missing'] if len(to_replace) != len(value): raise ValueError( f"Replacement lists must match in length. " f"Expecting {len(to_replace)} got {len(value)} " ) new_data = self._mgr.replace_list( src_list=to_replace, dest_list=value, inplace=inplace, regex=regex, ) elif to_replace is None: if not ( is_re_compilable(regex) or is_list_like(regex) or is_dict_like(regex) ): raise TypeError( f"'regex' must be a string or a compiled regular expression " f"or a list or dict of strings or regular expressions, " f"you passed a {repr(type(regex).__name__)}" ) return self.replace( regex, value, inplace=inplace, limit=limit, regex=True ) else: # dest iterable dict-like if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-value and " "non-None to_replace" ) mapping = {col: (to_replace, val) for col, val in value.items()} return self._replace_columnwise(mapping, inplace, regex) elif not is_list_like(value): # NA -> 0 regex = should_use_regex(regex, to_replace) if regex: new_data = self._mgr.replace_regex( to_replace=to_replace, value=value, inplace=inplace, ) else: new_data = self._mgr.replace( to_replace=to_replace, value=value, inplace=inplace ) else: raise TypeError( f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}' ) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="replace") def interpolate( self: NDFrameT, method: str = "linear", axis: Axis = 0, limit: int | None = None, inplace: bool_t = False, limit_direction: str | None = None, limit_area: str | None = None, downcast: str | None = None, **kwargs, ) -> NDFrameT | None: """ Fill NaN values using an interpolation method. Please note that only ``method='linear'`` is supported for DataFrame/Series with a MultiIndex. Parameters ---------- method : str, default 'linear' Interpolation technique to use. One of: * 'linear': Ignore the index and treat the values as equally spaced. This is the only method supported on MultiIndexes. * 'time': Works on daily and higher resolution data to interpolate given length of interval. * 'index', 'values': use the actual numerical values of the index. * 'pad': Fill in NaNs using existing values. * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline', 'barycentric', 'polynomial': Passed to `scipy.interpolate.interp1d`. These methods use the numerical values of the index. Both 'polynomial' and 'spline' require that you also specify an `order` (int), e.g. ``df.interpolate(method='polynomial', order=5)``. * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima', 'cubicspline': Wrappers around the SciPy interpolation methods of similar names. See `Notes`. * 'from_derivatives': Refers to `scipy.interpolate.BPoly.from_derivatives` which replaces 'piecewise_polynomial' interpolation method in scipy 0.18. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to interpolate along. limit : int, optional Maximum number of consecutive NaNs to fill. Must be greater than 0. inplace : bool, default False Update the data in place if possible. limit_direction : {{'forward', 'backward', 'both'}}, Optional Consecutive NaNs will be filled in this direction. If limit is specified: * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'. * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be 'backwards'. If 'limit' is not specified: * If 'method' is 'backfill' or 'bfill', the default is 'backward' * else the default is 'forward' .. versionchanged:: 1.1.0 raises ValueError if `limit_direction` is 'forward' or 'both' and method is 'backfill' or 'bfill'. raises ValueError if `limit_direction` is 'backward' or 'both' and method is 'pad' or 'ffill'. limit_area : {{`None`, 'inside', 'outside'}}, default None If limit is specified, consecutive NaNs will be filled with this restriction. * ``None``: No fill restriction. * 'inside': Only fill NaNs surrounded by valid values (interpolate). * 'outside': Only fill NaNs outside valid values (extrapolate). downcast : optional, 'infer' or None, defaults to None Downcast dtypes if possible. ``**kwargs`` : optional Keyword arguments to pass on to the interpolating function. Returns ------- Series or DataFrame or None Returns the same object type as the caller, interpolated at some or all ``NaN`` values or None if ``inplace=True``. See Also -------- fillna : Fill missing values using different methods. scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials (Akima interpolator). scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the Bernstein basis. scipy.interpolate.interp1d : Interpolate a 1-D function. scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh interpolator). scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic interpolation. scipy.interpolate.CubicSpline : Cubic spline data interpolator. Notes ----- The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima' methods are wrappers around the respective SciPy implementations of similar names. These use the actual numerical values of the index. For more information on their behavior, see the `SciPy documentation <https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__. Examples -------- Filling in ``NaN`` in a :class:`~pandas.Series` via linear interpolation. >>> s = pd.Series([0, 1, np.nan, 3]) >>> s 0 0.0 1 1.0 2 NaN 3 3.0 dtype: float64 >>> s.interpolate() 0 0.0 1 1.0 2 2.0 3 3.0 dtype: float64 Filling in ``NaN`` in a Series by padding, but filling at most two consecutive ``NaN`` at a time. >>> s = pd.Series([np.nan, "single_one", np.nan, ... "fill_two_more", np.nan, np.nan, np.nan, ... 4.71, np.nan]) >>> s 0 NaN 1 single_one 2 NaN 3 fill_two_more 4 NaN 5 NaN 6 NaN 7 4.71 8 NaN dtype: object >>> s.interpolate(method='pad', limit=2) 0 NaN 1 single_one 2 single_one 3 fill_two_more 4 fill_two_more 5 fill_two_more 6 NaN 7 4.71 8 4.71 dtype: object Filling in ``NaN`` in a Series via polynomial interpolation or splines: Both 'polynomial' and 'spline' methods require that you also specify an ``order`` (int). >>> s = pd.Series([0, 2, np.nan, 8]) >>> s.interpolate(method='polynomial', order=2) 0 0.000000 1 2.000000 2 4.666667 3 8.000000 dtype: float64 Fill the DataFrame forward (that is, going down) along each column using linear interpolation. Note how the last entry in column 'a' is interpolated differently, because there is no entry after it to use for interpolation. Note how the first entry in column 'b' remains ``NaN``, because there is no entry before it to use for interpolation. >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0), ... (np.nan, 2.0, np.nan, np.nan), ... (2.0, 3.0, np.nan, 9.0), ... (np.nan, 4.0, -4.0, 16.0)], ... columns=list('abcd')) >>> df a b c d 0 0.0 NaN -1.0 1.0 1 NaN 2.0 NaN NaN 2 2.0 3.0 NaN 9.0 3 NaN 4.0 -4.0 16.0 >>> df.interpolate(method='linear', limit_direction='forward', axis=0) a b c d 0 0.0 NaN -1.0 1.0 1 1.0 2.0 -2.0 5.0 2 2.0 3.0 -3.0 9.0 3 2.0 4.0 -4.0 16.0 Using polynomial interpolation. >>> df['d'].interpolate(method='polynomial', order=2) 0 1.0 1 4.0 2 9.0 3 16.0 Name: d, dtype: float64 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) fillna_methods = ["ffill", "bfill", "pad", "backfill"] should_transpose = axis == 1 and method not in fillna_methods obj = self.T if should_transpose else self if obj.empty: return self.copy() if method not in fillna_methods: axis = self._info_axis_number if isinstance(obj.index, MultiIndex) and method != "linear": raise ValueError( "Only `method=linear` interpolation is supported on MultiIndexes." ) # Set `limit_direction` depending on `method` if limit_direction is None: limit_direction = ( "backward" if method in ("backfill", "bfill") else "forward" ) else: if method in ("pad", "ffill") and limit_direction != "forward": raise ValueError( f"`limit_direction` must be 'forward' for method `{method}`" ) if method in ("backfill", "bfill") and limit_direction != "backward": raise ValueError( f"`limit_direction` must be 'backward' for method `{method}`" ) if obj.ndim == 2 and np.all(obj.dtypes == np.dtype("object")): raise TypeError( "Cannot interpolate with all object-dtype columns " "in the DataFrame. Try setting at least one " "column to a numeric dtype." ) # create/use the index if method == "linear": # prior default index = Index(np.arange(len(obj.index))) else: index = obj.index methods = {"index", "values", "nearest", "time"} is_numeric_or_datetime = ( is_numeric_dtype(index.dtype) or is_datetime64_any_dtype(index.dtype) or is_timedelta64_dtype(index.dtype) ) if method not in methods and not is_numeric_or_datetime: raise ValueError( "Index column must be numeric or datetime type when " f"using {method} method other than linear. " "Try setting a numeric or datetime index column before " "interpolating." ) if isna(index).any(): raise NotImplementedError( "Interpolation with NaNs in the index " "has not been implemented. Try filling " "those NaNs before interpolating." ) new_data = obj._mgr.interpolate( method=method, axis=axis, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, inplace=inplace, downcast=downcast, **kwargs, ) result = self._constructor(new_data) if should_transpose: result = result.T if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="interpolate") # ---------------------------------------------------------------------- # Timeseries methods Methods @final def asof(self, where, subset=None): """ Return the last row(s) without any NaNs before `where`. The last row (for each element in `where`, if list) without any NaN is taken. In case of a :class:`~pandas.DataFrame`, the last row without NaN considering only the subset of columns (if not `None`) If there is no good value, NaN is returned for a Series or a Series of NaN values for a DataFrame Parameters ---------- where : date or array-like of dates Date(s) before which the last row(s) are returned. subset : str or array-like of str, default `None` For DataFrame, if not `None`, only use these columns to check for NaNs. Returns ------- scalar, Series, or DataFrame The return can be: * scalar : when `self` is a Series and `where` is a scalar * Series: when `self` is a Series and `where` is an array-like, or when `self` is a DataFrame and `where` is a scalar * DataFrame : when `self` is a DataFrame and `where` is an array-like Return scalar, Series, or DataFrame. See Also -------- merge_asof : Perform an asof merge. Similar to left join. Notes ----- Dates are assumed to be sorted. Raises if this is not the case. Examples -------- A Series and a scalar `where`. >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40]) >>> s 10 1.0 20 2.0 30 NaN 40 4.0 dtype: float64 >>> s.asof(20) 2.0 For a sequence `where`, a Series is returned. The first value is NaN, because the first element of `where` is before the first index value. >>> s.asof([5, 20]) 5 NaN 20 2.0 dtype: float64 Missing values are not considered. The following is ``2.0``, not NaN, even though NaN is at the index location for ``30``. >>> s.asof(30) 2.0 Take all columns into consideration >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50], ... 'b': [None, None, None, None, 500]}, ... index=pd.DatetimeIndex(['2018-02-27 09:01:00', ... '2018-02-27 09:02:00', ... '2018-02-27 09:03:00', ... '2018-02-27 09:04:00', ... '2018-02-27 09:05:00'])) >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30'])) a b 2018-02-27 09:03:30 NaN NaN 2018-02-27 09:04:30 NaN NaN Take a single column into consideration >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30']), ... subset=['a']) a b 2018-02-27 09:03:30 30 NaN 2018-02-27 09:04:30 40 NaN """ if isinstance(where, str): where = Timestamp(where) if not self.index.is_monotonic_increasing: raise ValueError("asof requires a sorted index") is_series = isinstance(self, ABCSeries) if is_series: if subset is not None: raise ValueError("subset is not valid for Series") else: if subset is None: subset = self.columns if not is_list_like(subset): subset = [subset] is_list = is_list_like(where) if not is_list: start = self.index[0] if isinstance(self.index, PeriodIndex): where = Period(where, freq=self.index.freq) if where < start: if not is_series: return self._constructor_sliced( index=self.columns, name=where, dtype=np.float64 ) return np.nan # It's always much faster to use a *while* loop here for # Series than pre-computing all the NAs. However a # *while* loop is extremely expensive for DataFrame # so we later pre-compute all the NAs and use the same # code path whether *where* is a scalar or list. # See PR: https://github.com/pandas-dev/pandas/pull/14476 if is_series: loc = self.index.searchsorted(where, side="right") if loc > 0: loc -= 1 values = self._values while loc > 0 and isna(values[loc]): loc -= 1 return values[loc] if not isinstance(where, Index): where = Index(where) if is_list else Index([where]) nulls = self.isna() if is_series else self[subset].isna().any(1) if nulls.all(): if is_series: self = cast("Series", self) return self._constructor(np.nan, index=where, name=self.name) elif is_list: self = cast("DataFrame", self) return self._constructor(np.nan, index=where, columns=self.columns) else: self = cast("DataFrame", self) return self._constructor_sliced( np.nan, index=self.columns, name=where[0] ) locs = self.index.asof_locs(where, ~(nulls._values)) # mask the missing missing = locs == -1 data = self.take(locs) data.index = where if missing.any(): # GH#16063 only do this setting when necessary, otherwise # we'd cast e.g. bools to floats data.loc[missing] = np.nan return data if is_list else data.iloc[-1] # ---------------------------------------------------------------------- # Action Methods @doc(klass=_shared_doc_kwargs["klass"]) def isna(self: NDFrameT) -> NDFrameT: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or :attr:`numpy.NaN`, gets mapped to True values. Everything else gets mapped to False values. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is an NA value. See Also -------- {klass}.isnull : Alias of isna. {klass}.notna : Boolean inverse of isna. {klass}.dropna : Omit axes labels with missing values. isna : Top-level isna. Examples -------- Show which entries in a DataFrame are NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.isna() age born name toy 0 False True False True 1 False False False False 2 True False False False Show which entries in a Series are NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.isna() 0 False 1 False 2 True dtype: bool """ return isna(self).__finalize__(self, method="isna") @doc(isna, klass=_shared_doc_kwargs["klass"]) def isnull(self: NDFrameT) -> NDFrameT: return isna(self).__finalize__(self, method="isnull") @doc(klass=_shared_doc_kwargs["klass"]) def notna(self: NDFrameT) -> NDFrameT: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to True. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to False values. Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is not an NA value. See Also -------- {klass}.notnull : Alias of notna. {klass}.isna : Boolean inverse of notna. {klass}.dropna : Omit axes labels with missing values. notna : Top-level notna. Examples -------- Show which entries in a DataFrame are not NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.notna() age born name toy 0 True False True False 1 True True True True 2 False True True True Show which entries in a Series are not NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.notna() 0 True 1 True 2 False dtype: bool """ return notna(self).__finalize__(self, method="notna") @doc(notna, klass=_shared_doc_kwargs["klass"]) def notnull(self: NDFrameT) -> NDFrameT: return notna(self).__finalize__(self, method="notnull") @final def _clip_with_scalar(self, lower, upper, inplace: bool_t = False): if (lower is not None and np.any(isna(lower))) or ( upper is not None and np.any(isna(upper)) ): raise ValueError("Cannot use an NA value as a clip threshold") result = self mask = isna(self._values) with np.errstate(all="ignore"): if upper is not None: subset = self <= upper result = result.where(subset, upper, axis=None, inplace=False) if lower is not None: subset = self >= lower result = result.where(subset, lower, axis=None, inplace=False) if np.any(mask): result[mask] = np.nan if inplace: return self._update_inplace(result) else: return result @final def _clip_with_one_bound(self, threshold, method, axis, inplace): if axis is not None: axis = self._get_axis_number(axis) # method is self.le for upper bound and self.ge for lower bound if is_scalar(threshold) and is_number(threshold): if method.__name__ == "le": return self._clip_with_scalar(None, threshold, inplace=inplace) return self._clip_with_scalar(threshold, None, inplace=inplace) # GH #15390 # In order for where method to work, the threshold must # be transformed to NDFrame from other array like structure. if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold): if isinstance(self, ABCSeries): threshold = self._constructor(threshold, index=self.index) else: threshold = align_method_FRAME(self, threshold, axis, flex=None)[1] # GH 40420 # Treat missing thresholds as no bounds, not clipping the values if is_list_like(threshold): fill_value = np.inf if method.__name__ == "le" else -np.inf threshold_inf = threshold.fillna(fill_value) else: threshold_inf = threshold subset = method(threshold_inf, axis=axis) | isna(self) # GH 40420 return self.where(subset, threshold, axis=axis, inplace=inplace) def clip( self: NDFrameT, lower=None, upper=None, axis: Axis | None = None, inplace: bool_t = False, *args, **kwargs, ) -> NDFrameT | None: """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Thresholds can be singular values or array like, and in the latter case the clipping is performed element-wise in the specified axis. Parameters ---------- lower : float or array-like, default None Minimum threshold value. All values below this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. upper : float or array-like, default None Maximum threshold value. All values above this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. axis : int or str axis name, optional Align object with lower and upper along the given axis. inplace : bool, default False Whether to perform the operation in place on the data. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- Series or DataFrame or None Same type as calling object with the values outside the clip boundaries replaced or None if ``inplace=True``. See Also -------- Series.clip : Trim values at input threshold in series. DataFrame.clip : Trim values at input threshold in dataframe. numpy.clip : Clip (limit) the values in an array. Examples -------- >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]} >>> df = pd.DataFrame(data) >>> df col_0 col_1 0 9 -2 1 -3 -7 2 0 6 3 -1 8 4 5 -5 Clips per column using lower and upper thresholds: >>> df.clip(-4, 6) col_0 col_1 0 6 -2 1 -3 -4 2 0 6 3 -1 6 4 5 -4 Clips using specific lower and upper thresholds per column element: >>> t = pd.Series([2, -4, -1, 6, 3]) >>> t 0 2 1 -4 2 -1 3 6 4 3 dtype: int64 >>> df.clip(t, t + 4, axis=0) col_0 col_1 0 6 2 1 -3 -4 2 0 3 3 6 8 4 5 3 Clips using specific lower threshold per column element, with missing values: >>> t = pd.Series([2, -4, np.NaN, 6, 3]) >>> t 0 2.0 1 -4.0 2 NaN 3 6.0 4 3.0 dtype: float64 >>> df.clip(t, axis=0) col_0 col_1 0 9 2 1 -3 -4 2 0 6 3 6 8 4 5 3 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = nv.validate_clip_with_axis(axis, args, kwargs) if axis is not None: axis = self._get_axis_number(axis) # GH 17276 # numpy doesn't like NaN as a clip value # so ignore # GH 19992 # numpy doesn't drop a list-like bound containing NaN isna_lower = isna(lower) if not is_list_like(lower): if np.any(isna_lower): lower = None elif np.all(isna_lower): lower = None isna_upper = isna(upper) if not is_list_like(upper): if np.any(isna_upper): upper = None elif np.all(isna_upper): upper = None # GH 2747 (arguments were reversed) if ( lower is not None and upper is not None and is_scalar(lower) and is_scalar(upper) ): lower, upper = min(lower, upper), max(lower, upper) # fast-path for scalars if (lower is None or (is_scalar(lower) and is_number(lower))) and ( upper is None or (is_scalar(upper) and is_number(upper)) ): return self._clip_with_scalar(lower, upper, inplace=inplace) result = self if lower is not None: result = result._clip_with_one_bound( lower, method=self.ge, axis=axis, inplace=inplace ) if upper is not None: if inplace: result = self result = result._clip_with_one_bound( upper, method=self.le, axis=axis, inplace=inplace ) return result @doc(**_shared_doc_kwargs) def asfreq( self: NDFrameT, freq, method=None, how: str | None = None, normalize: bool_t = False, fill_value=None, ) -> NDFrameT: """ Convert time series to specified frequency. Returns the original data conformed to a new index with the specified frequency. If the index of this {klass} is a :class:`~pandas.PeriodIndex`, the new index is the result of transforming the original index with :meth:`PeriodIndex.asfreq <pandas.PeriodIndex.asfreq>` (so the original index will map one-to-one to the new index). Otherwise, the new index will be equivalent to ``pd.date_range(start, end, freq=freq)`` where ``start`` and ``end`` are, respectively, the first and last entries in the original index (see :func:`pandas.date_range`). The values corresponding to any timesteps in the new index which were not present in the original index will be null (``NaN``), unless a method for filling such unknowns is provided (see the ``method`` parameter below). The :meth:`resample` method is more appropriate if an operation on each group of timesteps (such as an aggregate) is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset or str Frequency DateOffset or string. method : {{'backfill'/'bfill', 'pad'/'ffill'}}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill. how : {{'start', 'end'}}, default end For PeriodIndex only (see PeriodIndex.asfreq). normalize : bool, default False Whether to reset output index to midnight. fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). Returns ------- {klass} {klass} object reindexed to the specified frequency. See Also -------- reindex : Conform DataFrame to new index with optional filling logic. Notes ----- To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({{'s': series}}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0 """ from pandas.core.resample import asfreq return asfreq( self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value, ) @final def at_time(self: NDFrameT, time, asof: bool_t = False, axis=None) -> NDFrameT: """ Select values at particular time of day (e.g., 9:30AM). Parameters ---------- time : datetime.time or str axis : {0 or 'index', 1 or 'columns'}, default 0 Returns ------- Series or DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='12H') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> ts.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") indexer = index.indexer_at_time(time, asof=asof) return self._take_with_is_copy(indexer, axis=axis) @final def between_time( self: NDFrameT, start_time, end_time, include_start: bool_t | lib.NoDefault = lib.no_default, include_end: bool_t | lib.NoDefault = lib.no_default, inclusive: IntervalClosedType | None = None, axis=None, ) -> NDFrameT: """ Select values between particular times of the day (e.g., 9:00-9:30 AM). By setting ``start_time`` to be later than ``end_time``, you can get the times that are *not* between the two times. Parameters ---------- start_time : datetime.time or str Initial time as a time filter limit. end_time : datetime.time or str End time as a time filter limit. include_start : bool, default True Whether the start time needs to be included in the result. .. deprecated:: 1.4.0 Arguments `include_start` and `include_end` have been deprecated to standardize boundary inputs. Use `inclusive` instead, to set each bound as closed or open. include_end : bool, default True Whether the end time needs to be included in the result. .. deprecated:: 1.4.0 Arguments `include_start` and `include_end` have been deprecated to standardize boundary inputs. Use `inclusive` instead, to set each bound as closed or open. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; whether to set each bound as closed or open. axis : {0 or 'index', 1 or 'columns'}, default 0 Determine range time on index or columns value. Returns ------- Series or DataFrame Data from the original object filtered to the specified dates range. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- at_time : Select values at a particular time of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_between_time : Get just the index locations for values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 2018-04-12 01:00:00 4 >>> ts.between_time('0:15', '0:45') A 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 You get the times that are *not* between two times by setting ``start_time`` later than ``end_time``: >>> ts.between_time('0:45', '0:15') A 2018-04-09 00:00:00 1 2018-04-12 01:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") old_include_arg_used = (include_start != lib.no_default) or ( include_end != lib.no_default ) if old_include_arg_used and inclusive is not None: raise ValueError( "Deprecated arguments `include_start` and `include_end` " "cannot be passed if `inclusive` has been given." ) # If any of the deprecated arguments ('include_start', 'include_end') # have been passed elif old_include_arg_used: warnings.warn( "`include_start` and `include_end` are deprecated in " "favour of `inclusive`.", FutureWarning, stacklevel=find_stack_level(), ) left = True if isinstance(include_start, lib.NoDefault) else include_start right = True if isinstance(include_end, lib.NoDefault) else include_end inc_dict: dict[tuple[bool_t, bool_t], IntervalClosedType] = { (True, True): "both", (True, False): "left", (False, True): "right", (False, False): "neither", } inclusive = inc_dict[(left, right)] elif inclusive is None: # On arg removal inclusive can default to "both" inclusive = "both" left_inclusive, right_inclusive = validate_inclusive(inclusive) indexer = index.indexer_between_time( start_time, end_time, include_start=left_inclusive, include_end=right_inclusive, ) return self._take_with_is_copy(indexer, axis=axis) @doc(**_shared_doc_kwargs) def resample( self, rule, axis=0, closed: str | None = None, label: str | None = None, convention: str = "start", kind: str | None = None, loffset=None, base: int | None = None, on=None, level=None, origin: str | TimestampConvertibleTypes = "start_day", offset: TimedeltaConvertibleTypes | None = None, ) -> Resampler: """ Resample time-series data. Convenience method for frequency conversion and resampling of time series. The object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, or `TimedeltaIndex`), or the caller must pass the label of a datetime-like series/index to the ``on``/``level`` keyword parameter. Parameters ---------- rule : DateOffset, Timedelta or str The offset string or object representing target conversion. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Which axis to use for up- or down-sampling. For `Series` this will default to 0, i.e. along the rows. Must be `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. closed : {{'right', 'left'}}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {{'right', 'left'}}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {{'start', 'end', 's', 'e'}}, default 'start' For `PeriodIndex` only, controls whether to use the start or end of `rule`. kind : {{'timestamp', 'period'}}, optional, default None Pass 'timestamp' to convert the resulting index to a `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. By default the input representation is retained. loffset : timedelta, default None Adjust the resampled time labels. .. deprecated:: 1.1.0 You should add the loffset to the `df.index` after the resample. See below. base : int, default 0 For frequencies that evenly subdivide 1 day, the "origin" of the aggregated intervals. For example, for '5min' frequency, base could range from 0 through 4. Defaults to 0. .. deprecated:: 1.1.0 The new arguments that you should use are 'offset' or 'origin'. on : str, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. level : str or int, optional For a MultiIndex, level (name or number) to use for resampling. `level` must be datetime-like. origin : Timestamp or str, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If string, must be one of the following: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries .. versionadded:: 1.1.0 - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day .. versionadded:: 1.3.0 offset : Timedelta or str, default is None An offset timedelta added to the origin. .. versionadded:: 1.1.0 Returns ------- pandas.core.Resampler :class:`~pandas.core.Resampler` object. See Also -------- Series.resample : Resample a Series. DataFrame.resample : Resample a DataFrame. groupby : Group {klass} by mapping, function, label, or list of labels. asfreq : Reindex a {klass} with the given frequency without grouping. Notes ----- See the `user guide <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`__ for more. To learn more about the offset strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__. Examples -------- Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') >>> series = pd.Series(range(9), index=index) >>> series 2000-01-01 00:00:00 0 2000-01-01 00:01:00 1 2000-01-01 00:02:00 2 2000-01-01 00:03:00 3 2000-01-01 00:04:00 4 2000-01-01 00:05:00 5 2000-01-01 00:06:00 6 2000-01-01 00:07:00 7 2000-01-01 00:08:00 8 Freq: T, dtype: int64 Downsample the series into 3 minute bins and sum the values of the timestamps falling into a bin. >>> series.resample('3T').sum() 2000-01-01 00:00:00 3 2000-01-01 00:03:00 12 2000-01-01 00:06:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed value in the resampled bucket with the label ``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', label='right').sum() 2000-01-01 00:03:00 3 2000-01-01 00:06:00 12 2000-01-01 00:09:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> series.resample('3T', label='right', closed='right').sum() 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 2000-01-01 00:06:00 15 2000-01-01 00:09:00 15 Freq: 3T, dtype: int64 Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``pad`` method. >>> series.resample('30S').pad()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 0 2000-01-01 00:01:00 1 2000-01-01 00:01:30 1 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S').bfill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 1 2000-01-01 00:01:00 1 2000-01-01 00:01:30 2 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Pass a custom function via ``apply`` >>> def custom_resampler(arraylike): ... return np.sum(arraylike) + 5 ... >>> series.resample('3T').apply(custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 2000-01-01 00:06:00 26 Freq: 3T, dtype: int64 For a Series with a PeriodIndex, the keyword `convention` can be used to control whether to use the start or end of `rule`. Resample a year by quarter using 'start' `convention`. Values are assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', ... freq='A', ... periods=2)) >>> s 2012 1 2013 2 Freq: A-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN 2012Q3 NaN 2012Q4 NaN 2013Q1 2.0 2013Q2 NaN 2013Q3 NaN 2013Q4 NaN Freq: Q-DEC, dtype: float64 Resample quarters by month using 'end' `convention`. Values are assigned to the last month of the period. >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', ... freq='Q', ... periods=4)) >>> q 2018Q1 1 2018Q2 2 2018Q3 3 2018Q4 4 Freq: Q-DEC, dtype: int64 >>> q.resample('M', convention='end').asfreq() 2018-03 1.0 2018-04 NaN 2018-05 NaN 2018-06 2.0 2018-07 NaN 2018-08 NaN 2018-09 3.0 2018-10 NaN 2018-11 NaN 2018-12 4.0 Freq: M, dtype: float64 For DataFrame objects, the keyword `on` can be used to specify the column instead of the index for resampling. >>> d = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df = pd.DataFrame(d) >>> df['week_starting'] = pd.date_range('01/01/2018', ... periods=8, ... freq='W') >>> df price volume week_starting 0 10 50 2018-01-07 1 11 60 2018-01-14 2 9 40 2018-01-21 3 13 100 2018-01-28 4 14 50 2018-02-04 5 18 100 2018-02-11 6 17 40 2018-02-18 7 19 50 2018-02-25 >>> df.resample('M', on='week_starting').mean() price volume week_starting 2018-01-31 10.75 62.5 2018-02-28 17.00 60.0 For a DataFrame with MultiIndex, the keyword `level` can be used to specify on which level the resampling needs to take place. >>> days = pd.date_range('1/1/2000', periods=4, freq='D') >>> d2 = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df2 = pd.DataFrame( ... d2, ... index=pd.MultiIndex.from_product( ... [days, ['morning', 'afternoon']] ... ) ... ) >>> df2 price volume 2000-01-01 morning 10 50 afternoon 11 60 2000-01-02 morning 9 40 afternoon 13 100 2000-01-03 morning 14 50 afternoon 18 100 2000-01-04 morning 17 40 afternoon 19 50 >>> df2.resample('D', level=0).sum() price volume 2000-01-01 21 110 2000-01-02 22 140 2000-01-03 32 150 2000-01-04 36 90 If you want to adjust the start of the bins based on a fixed timestamp: >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00' >>> rng = pd.date_range(start, end, freq='7min') >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng) >>> ts 2000-10-01 23:30:00 0 2000-10-01 23:37:00 3 2000-10-01 23:44:00 6 2000-10-01 23:51:00 9 2000-10-01 23:58:00 12 2000-10-02 00:05:00 15 2000-10-02 00:12:00 18 2000-10-02 00:19:00 21 2000-10-02 00:26:00 24 Freq: 7T, dtype: int64 >>> ts.resample('17min').sum() 2000-10-01 23:14:00 0 2000-10-01 23:31:00 9 2000-10-01 23:48:00 21 2000-10-02 00:05:00 54 2000-10-02 00:22:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='epoch').sum() 2000-10-01 23:18:00 0 2000-10-01 23:35:00 18 2000-10-01 23:52:00 27 2000-10-02 00:09:00 39 2000-10-02 00:26:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='2000-01-01').sum() 2000-10-01 23:24:00 3 2000-10-01 23:41:00 15 2000-10-01 23:58:00 45 2000-10-02 00:15:00 45 Freq: 17T, dtype: int64 If you want to adjust the start of the bins with an `offset` Timedelta, the two following lines are equivalent: >>> ts.resample('17min', origin='start').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', offset='23h30min').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 If you want to take the largest Timestamp as the end of the bins: >>> ts.resample('17min', origin='end').sum() 2000-10-01 23:35:00 0 2000-10-01 23:52:00 18 2000-10-02 00:09:00 27 2000-10-02 00:26:00 63 Freq: 17T, dtype: int64 In contrast with the `start_day`, you can use `end_day` to take the ceiling midnight of the largest Timestamp as the end of the bins and drop the bins not containing data: >>> ts.resample('17min', origin='end_day').sum() 2000-10-01 23:38:00 3 2000-10-01 23:55:00 15 2000-10-02 00:12:00 45 2000-10-02 00:29:00 45 Freq: 17T, dtype: int64 To replace the use of the deprecated `base` argument, you can now use `offset`, in this example it is equivalent to have `base=2`: >>> ts.resample('17min', offset='2min').sum() 2000-10-01 23:16:00 0 2000-10-01 23:33:00 9 2000-10-01 23:50:00 36 2000-10-02 00:07:00 39 2000-10-02 00:24:00 24 Freq: 17T, dtype: int64 To replace the use of the deprecated `loffset` argument: >>> from pandas.tseries.frequencies import to_offset >>> loffset = '19min' >>> ts_out = ts.resample('17min').sum() >>> ts_out.index = ts_out.index + to_offset(loffset) >>> ts_out 2000-10-01 23:33:00 0 2000-10-01 23:50:00 9 2000-10-02 00:07:00 21 2000-10-02 00:24:00 54 2000-10-02 00:41:00 24 Freq: 17T, dtype: int64 """ from pandas.core.resample import get_resampler axis = self._get_axis_number(axis) return get_resampler( self, freq=rule, label=label, closed=closed, axis=axis, kind=kind, loffset=loffset, convention=convention, base=base, key=on, level=level, origin=origin, offset=offset, ) @final def first(self: NDFrameT, offset) -> NDFrameT: """ Select initial periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the first few rows based on a date offset. Parameters ---------- offset : str, DateOffset or dateutil.relativedelta The offset length of the data that will be selected. For instance, '1M' will display all the rows having their index within the first month. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- last : Select final periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the first 3 days: >>> ts.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calendar days were returned, not the first 3 days observed in the dataset, and therefore data for 2018-04-13 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'first' only supports a DatetimeIndex index") if len(self.index) == 0: return self offset = to_offset(offset) if not isinstance(offset, Tick) and offset.is_on_offset(self.index[0]): # GH#29623 if first value is end of period, remove offset with n = 1 # before adding the real offset end_date = end = self.index[0] - offset.base + offset else: end_date = end = self.index[0] + offset # Tick-like, e.g. 3 weeks if isinstance(offset, Tick) and end_date in self.index: end = self.index.searchsorted(end_date, side="left") return self.iloc[:end] return self.loc[:end] @final def last(self: NDFrameT, offset) -> NDFrameT: """ Select final periods of time series data based on a date offset. For a DataFrame with a sorted DatetimeIndex, this function selects the last few rows based on a date offset. Parameters ---------- offset : str, DateOffset, dateutil.relativedelta The offset length of the data that will be selected. For instance, '3D' will display all the rows having their index within the last 3 days. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- first : Select initial periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> ts.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calendar days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'last' only supports a DatetimeIndex index") if len(self.index) == 0: return self offset = to_offset(offset) start_date = self.index[-1] - offset start = self.index.searchsorted(start_date, side="right") return self.iloc[start:] @final def rank( self: NDFrameT, axis=0, method: str = "average", numeric_only: bool_t | None | lib.NoDefault = lib.no_default, na_option: str = "keep", ascending: bool_t = True, pct: bool_t = False, ) -> NDFrameT: """ Compute numerical data ranks (1 through n) along axis. By default, equal values are assigned a rank that is the average of the ranks of those values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Index to direct ranking. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' How to rank the group of records that have the same value (i.e. ties): * average: average rank of the group * min: lowest rank in the group * max: highest rank in the group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups. numeric_only : bool, optional For DataFrame objects, rank only numeric columns if set to True. na_option : {'keep', 'top', 'bottom'}, default 'keep' How to rank NaN values: * keep: assign NaN rank to NaN values * top: assign lowest rank to NaN values * bottom: assign highest rank to NaN values ascending : bool, default True Whether or not the elements should be ranked in ascending order. pct : bool, default False Whether or not to display the returned rankings in percentile form. Returns ------- same type as caller Return a Series or DataFrame with data ranks as values. See Also -------- core.groupby.GroupBy.rank : Rank of values within each group. Examples -------- >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog', ... 'spider', 'snake'], ... 'Number_legs': [4, 2, 4, 8, np.nan]}) >>> df Animal Number_legs 0 cat 4.0 1 penguin 2.0 2 dog 4.0 3 spider 8.0 4 snake NaN Ties are assigned the mean of the ranks (by default) for the group. >>> s = pd.Series(range(5), index=list("abcde")) >>> s["d"] = s["b"] >>> s.rank() a 1.0 b 2.5 c 4.0 d 2.5 e 5.0 dtype: float64 The following example shows how the method behaves with the above parameters: * default_rank: this is the default behaviour obtained without using any parameter. * max_rank: setting ``method = 'max'`` the records that have the same values are ranked using the highest rank (e.g.: since 'cat' and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.) * NA_bottom: choosing ``na_option = 'bottom'``, if there are records with NaN values they are placed at the bottom of the ranking. * pct_rank: when setting ``pct = True``, the ranking is expressed as percentile rank. >>> df['default_rank'] = df['Number_legs'].rank() >>> df['max_rank'] = df['Number_legs'].rank(method='max') >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom') >>> df['pct_rank'] = df['Number_legs'].rank(pct=True) >>> df Animal Number_legs default_rank max_rank NA_bottom pct_rank 0 cat 4.0 2.5 3.0 2.5 0.625 1 penguin 2.0 1.0 1.0 1.0 0.250 2 dog 4.0 2.5 3.0 2.5 0.625 3 spider 8.0 4.0 4.0 4.0 1.000 4 snake NaN NaN NaN 5.0 NaN """ warned = False if numeric_only is None: # GH#45036 warnings.warn( f"'numeric_only=None' in {type(self).__name__}.rank is deprecated " "and will raise in a future version. Pass either 'True' or " "'False'. 'False' will be the default.", FutureWarning, stacklevel=find_stack_level(), ) warned = True elif numeric_only is lib.no_default: numeric_only = None axis = self._get_axis_number(axis) if na_option not in {"keep", "top", "bottom"}: msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) def ranker(data): if data.ndim == 2: # i.e. DataFrame, we cast to ndarray values = data.values else: # i.e. Series, can dispatch to EA values = data._values if isinstance(values, ExtensionArray): ranks = values._rank( axis=axis, method=method, ascending=ascending, na_option=na_option, pct=pct, ) else: ranks = algos.rank( values, axis=axis, method=method, ascending=ascending, na_option=na_option, pct=pct, ) ranks_obj = self._constructor(ranks, **data._construct_axes_dict()) return ranks_obj.__finalize__(self, method="rank") # if numeric_only is None, and we can't get anything, we try with # numeric_only=True if numeric_only is None: try: return ranker(self) except TypeError: numeric_only = True if not warned: # Only warn here if we didn't already issue a warning above # GH#45036 warnings.warn( f"Dropping of nuisance columns in {type(self).__name__}.rank " "is deprecated; in a future version this will raise TypeError. " "Select only valid columns before calling rank.", FutureWarning, stacklevel=find_stack_level(), ) if numeric_only: data = self._get_numeric_data() else: data = self return ranker(data) @doc(_shared_docs["compare"], klass=_shared_doc_kwargs["klass"]) def compare( self, other, align_axis: Axis = 1, keep_shape: bool_t = False, keep_equal: bool_t = False, ): from pandas.core.reshape.concat import concat if type(self) is not type(other): cls_self, cls_other = type(self).__name__, type(other).__name__ raise TypeError( f"can only compare '{cls_self}' (not '{cls_other}') with '{cls_self}'" ) mask = ~((self == other) | (self.isna() & other.isna())) keys = ["self", "other"] if not keep_equal: self = self.where(mask) other = other.where(mask) if not keep_shape: if isinstance(self, ABCDataFrame): cmask = mask.any() rmask = mask.any(axis=1) self = self.loc[rmask, cmask] other = other.loc[rmask, cmask] else: self = self[mask] other = other[mask] if align_axis in (1, "columns"): # This is needed for Series axis = 1 else: axis = self._get_axis_number(align_axis) diff = concat([self, other], axis=axis, keys=keys) if axis >= self.ndim: # No need to reorganize data if stacking on new axis # This currently applies for stacking two Series on columns return diff ax = diff._get_axis(axis) ax_names = np.array(ax.names) # set index names to positions to avoid confusion ax.names = np.arange(len(ax_names)) # bring self-other to inner level order = list(range(1, ax.nlevels)) + [0] if isinstance(diff, ABCDataFrame): diff = diff.reorder_levels(order, axis=axis) else: diff = diff.reorder_levels(order) # restore the index names in order diff._get_axis(axis=axis).names = ax_names[order] # reorder axis to keep things organized indices = ( np.arange(diff.shape[axis]).reshape([2, diff.shape[axis] // 2]).T.flatten() ) diff = diff.take(indices, axis=axis) return diff @doc(**_shared_doc_kwargs) def align( self, other, join="outer", axis=None, level=None, copy=True, fill_value=None, method=None, limit=None, fill_axis=0, broadcast_axis=None, ): """ Align two objects on their axes with the specified join method. Join method is specified for each axis Index. Parameters ---------- other : DataFrame or Series join : {{'outer', 'inner', 'left', 'right'}}, default 'outer' axis : allowed axis of the other object, default None Align on index (0), columns (1), or both (None). level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. copy : bool, default True Always returns new objects. If copy=False and no reindexing is required then original objects are returned. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: - pad / ffill: propagate last valid observation forward to next valid. - backfill / bfill: use NEXT valid observation to fill gap. limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. fill_axis : {axes_single_arg}, default 0 Filling axis, method and limit. broadcast_axis : {axes_single_arg}, default None Broadcast values along this axis, if aligning two objects of different dimensions. Returns ------- (left, right) : ({klass}, type of other) Aligned objects. Examples -------- >>> df = pd.DataFrame( ... [[1, 2, 3, 4], [6, 7, 8, 9]], columns=["D", "B", "E", "A"], index=[1, 2] ... ) >>> other = pd.DataFrame( ... [[10, 20, 30, 40], [60, 70, 80, 90], [600, 700, 800, 900]], ... columns=["A", "B", "C", "D"], ... index=[2, 3, 4], ... ) >>> df D B E A 1 1 2 3 4 2 6 7 8 9 >>> other A B C D 2 10 20 30 40 3 60 70 80 90 4 600 700 800 900 Align on columns: >>> left, right = df.align(other, join="outer", axis=1) >>> left A B C D E 1 4 2 NaN 1 3 2 9 7 NaN 6 8 >>> right A B C D E 2 10 20 30 40 NaN 3 60 70 80 90 NaN 4 600 700 800 900 NaN We can also align on the index: >>> left, right = df.align(other, join="outer", axis=0) >>> left D B E A 1 1.0 2.0 3.0 4.0 2 6.0 7.0 8.0 9.0 3 NaN NaN NaN NaN 4 NaN NaN NaN NaN >>> right A B C D 1 NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 3 60.0 70.0 80.0 90.0 4 600.0 700.0 800.0 900.0 Finally, the default `axis=None` will align on both index and columns: >>> left, right = df.align(other, join="outer", axis=None) >>> left A B C D E 1 4.0 2.0 NaN 1.0 3.0 2 9.0 7.0 NaN 6.0 8.0 3 NaN NaN NaN NaN NaN 4 NaN NaN NaN NaN NaN >>> right A B C D E 1 NaN NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 NaN 3 60.0 70.0 80.0 90.0 NaN 4 600.0 700.0 800.0 900.0 NaN """ method = missing.clean_fill_method(method) if broadcast_axis == 1 and self.ndim != other.ndim: if isinstance(self, ABCSeries): # this means other is a DataFrame, and we need to broadcast # self cons = self._constructor_expanddim df = cons( {c: self for c in other.columns}, **other._construct_axes_dict() ) return df._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): # this means self is a DataFrame, and we need to broadcast # other cons = other._constructor_expanddim df = cons( {c: other for c in self.columns}, **self._construct_axes_dict() ) return self._align_frame( df, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) if axis is not None: axis = self._get_axis_number(axis) if isinstance(other, ABCDataFrame): return self._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): return self._align_series( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") @final def _align_frame( self, other, join="outer", axis=None, level=None, copy: bool_t = True, fill_value=None, method=None, limit=None, fill_axis=0, ): # defaults join_index, join_columns = None, None ilidx, iridx = None, None clidx, cridx = None, None is_series = isinstance(self, ABCSeries) if (axis is None or axis == 0) and not self.index.equals(other.index): join_index, ilidx, iridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if ( (axis is None or axis == 1) and not is_series and not self.columns.equals(other.columns) ): join_columns, clidx, cridx = self.columns.join( other.columns, how=join, level=level, return_indexers=True ) if is_series: reindexers = {0: [join_index, ilidx]} else: reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]} left = self._reindex_with_indexers( reindexers, copy=copy, fill_value=fill_value, allow_dups=True ) # other must be always DataFrame right = other._reindex_with_indexers( {0: [join_index, iridx], 1: [join_columns, cridx]}, copy=copy, fill_value=fill_value, allow_dups=True, ) if method is not None: _left = left.fillna(method=method, axis=fill_axis, limit=limit) assert _left is not None # needed for mypy left = _left right = right.fillna(method=method, axis=fill_axis, limit=limit) # if DatetimeIndex have different tz, convert to UTC left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) @final def _align_series( self, other, join="outer", axis=None, level=None, copy: bool_t = True, fill_value=None, method=None, limit=None, fill_axis=0, ): is_series = isinstance(self, ABCSeries) if (not is_series and axis is None) or axis not in [None, 0, 1]: raise ValueError("Must specify axis=0 or 1") if is_series and axis == 1: raise ValueError("cannot align series to a series other than axis 0") # series/series compat, other must always be a Series if not axis: # equal if self.index.equals(other.index): join_index, lidx, ridx = None, None, None else: join_index, lidx, ridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if is_series: left = self._reindex_indexer(join_index, lidx, copy) elif lidx is None or join_index is None: left = self.copy() if copy else self else: left = self._constructor( self._mgr.reindex_indexer(join_index, lidx, axis=1, copy=copy) ) right = other._reindex_indexer(join_index, ridx, copy) else: # one has > 1 ndim fdata = self._mgr join_index = self.axes[1] lidx, ridx = None, None if not join_index.equals(other.index): join_index, lidx, ridx = join_index.join( other.index, how=join, level=level, return_indexers=True ) if lidx is not None: bm_axis = self._get_block_manager_axis(1) fdata = fdata.reindex_indexer(join_index, lidx, axis=bm_axis) if copy and fdata is self._mgr: fdata = fdata.copy() left = self._constructor(fdata) if ridx is None: right = other else: right = other.reindex(join_index, level=level) # fill fill_na = notna(fill_value) or (method is not None) if fill_na: left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis) right = right.fillna(fill_value, method=method, limit=limit) # if DatetimeIndex have different tz, convert to UTC if is_series or (not is_series and axis == 0): left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) @final def _where( self, cond, other=lib.no_default, inplace=False, axis=None, level=None, errors="raise", ): """ Equivalent to public method `where`, except that `other` is not applied as a function even if callable. Used in __setitem__. """ inplace = validate_bool_kwarg(inplace, "inplace") if axis is not None: axis = self._get_axis_number(axis) # align the cond to same shape as myself cond = com.apply_if_callable(cond, self) if isinstance(cond, NDFrame): cond, _ = cond.align(self, join="right", broadcast_axis=1, copy=False) else: if not hasattr(cond, "shape"): cond = np.asanyarray(cond) if cond.shape != self.shape: raise ValueError("Array conditional must be same shape as self") cond = self._constructor(cond, **self._construct_axes_dict()) # make sure we are boolean fill_value = bool(inplace) cond = cond.fillna(fill_value) msg = "Boolean array expected for the condition, not {dtype}" if not cond.empty: if not isinstance(cond, ABCDataFrame): # This is a single-dimensional object. if not is_bool_dtype(cond): raise ValueError(msg.format(dtype=cond.dtype)) else: for dt in cond.dtypes: if not is_bool_dtype(dt): raise ValueError(msg.format(dtype=dt)) else: # GH#21947 we have an empty DataFrame/Series, could be object-dtype cond = cond.astype(bool) cond = -cond if inplace else cond cond = cond.reindex(self._info_axis, axis=self._info_axis_number, copy=False) # try to align with other if isinstance(other, NDFrame): # align with me if other.ndim <= self.ndim: _, other = self.align( other, join="left", axis=axis, level=level, fill_value=None, copy=False, ) # if we are NOT aligned, raise as we cannot where index if axis is None and not other._indexed_same(self): raise InvalidIndexError elif other.ndim < self.ndim: # TODO(EA2D): avoid object-dtype cast in EA case GH#38729 other = other._values if axis == 0: other = np.reshape(other, (-1, 1)) elif axis == 1: other = np.reshape(other, (1, -1)) other = np.broadcast_to(other, self.shape) # slice me out of the other else: raise NotImplementedError( "cannot align with a higher dimensional NDFrame" ) elif not isinstance(other, (MultiIndex, NDFrame)): # mainly just catching Index here other = extract_array(other, extract_numpy=True) if isinstance(other, (np.ndarray, ExtensionArray)): if other.shape != self.shape: if self.ndim != 1: # In the ndim == 1 case we may have # other length 1, which we treat as scalar (GH#2745, GH#4192) # or len(other) == icond.sum(), which we treat like # __setitem__ (GH#3235) raise ValueError( "other must be the same shape as self when an ndarray" ) # we are the same shape, so create an actual object for alignment else: other = self._constructor(other, **self._construct_axes_dict()) if axis is None: axis = 0 if self.ndim == getattr(other, "ndim", 0): align = True else: align = self._get_axis_number(axis) == 1 if inplace: # we may have different type blocks come out of putmask, so # reconstruct the block manager self._check_inplace_setting(other) new_data = self._mgr.putmask(mask=cond, new=other, align=align) result = self._constructor(new_data) return self._update_inplace(result) else: new_data = self._mgr.where( other=other, cond=cond, align=align, ) result = self._constructor(new_data) return result.__finalize__(self) @doc( klass=_shared_doc_kwargs["klass"], cond="True", cond_rev="False", name="where", name_other="mask", ) def where( self, cond, other=np.nan, inplace=False, axis=None, level=None, errors="raise", try_cast=lib.no_default, ): """ Replace values where the condition is {cond_rev}. Parameters ---------- cond : bool {klass}, array-like, or callable Where `cond` is {cond}, keep the original value. Where {cond_rev}, replace with corresponding value from `other`. If `cond` is callable, it is computed on the {klass} and should return boolean {klass} or array. The callable must not change input {klass} (though pandas doesn't check it). other : scalar, {klass}, or callable Entries where `cond` is {cond_rev} are replaced with corresponding value from `other`. If other is callable, it is computed on the {klass} and should return scalar or {klass}. The callable must not change input {klass} (though pandas doesn't check it). inplace : bool, default False Whether to perform the operation in place on the data. axis : int, default None Alignment axis if needed. level : int, default None Alignment level if needed. errors : str, {{'raise', 'ignore'}}, default 'raise' Note that currently this parameter won't affect the results and will always coerce to a suitable dtype. - 'raise' : allow exceptions to be raised. - 'ignore' : suppress exceptions. On error return original object. try_cast : bool, default None Try to cast the result back to the input type (if possible). .. deprecated:: 1.3.0 Manually cast back if necessary. Returns ------- Same type as caller or None if ``inplace=True``. See Also -------- :func:`DataFrame.{name_other}` : Return an object of same shape as self. Notes ----- The {name} method is an application of the if-then idiom. For each element in the calling DataFrame, if ``cond`` is ``{cond}`` the element is used; otherwise the corresponding element from the DataFrame ``other`` is used. The signature for :func:`DataFrame.where` differs from :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to ``np.where(m, df1, df2)``. For further details and examples see the ``{name}`` documentation in :ref:`indexing <indexing.where_mask>`. Examples -------- >>> s = pd.Series(range(5)) >>> s.where(s > 0) 0 NaN 1 1.0 2 2.0 3 3.0 4 4.0 dtype: float64 >>> s.mask(s > 0) 0 0.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 >>> s.where(s > 1, 10) 0 10 1 10 2 2 3 3 4 4 dtype: int64 >>> s.mask(s > 1, 10) 0 0 1 1 2 10 3 10 4 10 dtype: int64 >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B']) >>> df A B 0 0 1 1 2 3 2 4 5 3 6 7 4 8 9 >>> m = df % 3 == 0 >>> df.where(m, -df) A B 0 0 -1 1 -2 3 2 -4 -5 3 6 -7 4 -8 9 >>> df.where(m, -df) == np.where(m, df, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True >>> df.where(m, -df) == df.mask(~m, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True """ other = com.apply_if_callable(other, self) if try_cast is not lib.no_default: warnings.warn( "try_cast keyword is deprecated and will be removed in a " "future version.", FutureWarning, stacklevel=find_stack_level(), ) return self._where(cond, other, inplace, axis, level, errors=errors) @doc( where, klass=_shared_doc_kwargs["klass"], cond="False", cond_rev="True", name="mask", name_other="where", ) def mask( self, cond, other=np.nan, inplace=False, axis=None, level=None, errors="raise", try_cast=lib.no_default, ): inplace = validate_bool_kwarg(inplace, "inplace") cond = com.apply_if_callable(cond, self) if try_cast is not lib.no_default: warnings.warn( "try_cast keyword is deprecated and will be removed in a " "future version.", FutureWarning, stacklevel=find_stack_level(), ) # see gh-21891 if not hasattr(cond, "__invert__"): cond = np.array(cond) return self.where( ~cond, other=other, inplace=inplace, axis=axis, level=level, errors=errors, ) @doc(klass=_shared_doc_kwargs["klass"]) def shift( self: NDFrameT, periods=1, freq=None, axis=0, fill_value=None ) -> NDFrameT: """ Shift index by desired number of periods with an optional time `freq`. When `freq` is not passed, shift the index without realigning the data. If `freq` is passed (in this case, the index must be date or datetime, or it will raise a `NotImplementedError`), the index will be increased using the periods and the `freq`. `freq` can be inferred when specified as "infer" as long as either freq or inferred_freq attribute is set in the index. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. freq : DateOffset, tseries.offsets, timedelta, or str, optional Offset to use from the tseries module or time rule (e.g. 'EOM'). If `freq` is specified then the index values are shifted but the data is not realigned. That is, use `freq` if you would like to extend the index when shifting and preserve the original data. If `freq` is specified as "infer" then it will be inferred from the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown. axis : {{0 or 'index', 1 or 'columns', None}}, default None Shift direction. fill_value : object, optional The scalar value to use for newly introduced missing values. the default depends on the dtype of `self`. For numeric data, ``np.nan`` is used. For datetime, timedelta, or period data, etc. :attr:`NaT` is used. For extension dtypes, ``self.dtype.na_value`` is used. .. versionchanged:: 1.1.0 Returns ------- {klass} Copy of input object, shifted. See Also -------- Index.shift : Shift values of Index. DatetimeIndex.shift : Shift values of DatetimeIndex. PeriodIndex.shift : Shift values of PeriodIndex. tshift : Shift the time index, using the index's frequency if available. Examples -------- >>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45], ... "Col2": [13, 23, 18, 33, 48], ... "Col3": [17, 27, 22, 37, 52]}}, ... index=pd.date_range("2020-01-01", "2020-01-05")) >>> df Col1 Col2 Col3 2020-01-01 10 13 17 2020-01-02 20 23 27 2020-01-03 15 18 22 2020-01-04 30 33 37 2020-01-05 45 48 52 >>> df.shift(periods=3) Col1 Col2 Col3 2020-01-01 NaN NaN NaN 2020-01-02 NaN NaN NaN 2020-01-03 NaN NaN NaN 2020-01-04 10.0 13.0 17.0 2020-01-05 20.0 23.0 27.0 >>> df.shift(periods=1, axis="columns") Col1 Col2 Col3 2020-01-01 NaN 10 13 2020-01-02 NaN 20 23 2020-01-03 NaN 15 18 2020-01-04 NaN 30 33 2020-01-05 NaN 45 48 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 2020-01-01 0 0 0 2020-01-02 0 0 0 2020-01-03 0 0 0 2020-01-04 10 13 17 2020-01-05 20 23 27 >>> df.shift(periods=3, freq="D") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 >>> df.shift(periods=3, freq="infer") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 """ if periods == 0: return self.copy() if freq is None: # when freq is None, data is shifted, index is not axis = self._get_axis_number(axis) new_data = self._mgr.shift( periods=periods, axis=axis, fill_value=fill_value ) return self._constructor(new_data).__finalize__(self, method="shift") # when freq is given, index is shifted, data is not index = self._get_axis(axis) if freq == "infer": freq = getattr(index, "freq", None) if freq is None: freq = getattr(index, "inferred_freq", None) if freq is None: msg = "Freq was not set in the index hence cannot be inferred" raise ValueError(msg) elif isinstance(freq, str): freq = to_offset(freq) if isinstance(index, PeriodIndex): orig_freq = to_offset(index.freq) if freq != orig_freq: assert orig_freq is not None # for mypy raise ValueError( f"Given freq {freq.rule_code} does not match " f"PeriodIndex freq {orig_freq.rule_code}" ) new_ax = index.shift(periods) else: new_ax = index.shift(periods, freq) result = self.set_axis(new_ax, axis=axis) return result.__finalize__(self, method="shift") @final def slice_shift(self: NDFrameT, periods: int = 1, axis=0) -> NDFrameT: """ Equivalent to `shift` without copying data. The shifted data will not include the dropped periods and the shifted axis will be smaller than the original. .. deprecated:: 1.2.0 slice_shift is deprecated, use DataFrame/Series.shift instead. Parameters ---------- periods : int Number of periods to move, can be positive or negative. Returns ------- shifted : same type as caller Notes ----- While the `slice_shift` is faster than `shift`, you may pay for it later during alignment. """ msg = ( "The 'slice_shift' method is deprecated " "and will be removed in a future version. " "You can use DataFrame/Series.shift instead." ) warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) if periods == 0: return self if periods > 0: vslicer = slice(None, -periods) islicer = slice(periods, None) else: vslicer = slice(-periods, None) islicer = slice(None, periods) new_obj = self._slice(vslicer, axis=axis) shifted_axis = self._get_axis(axis)[islicer] new_obj.set_axis(shifted_axis, axis=axis, inplace=True) return new_obj.__finalize__(self, method="slice_shift") @final def tshift(self: NDFrameT, periods: int = 1, freq=None, axis: Axis = 0) -> NDFrameT: """ Shift the time index, using the index's frequency if available. .. deprecated:: 1.1.0 Use `shift` instead. Parameters ---------- periods : int Number of periods to move, can be positive or negative. freq : DateOffset, timedelta, or str, default None Increment to use from the tseries module or time rule expressed as a string (e.g. 'EOM'). axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0 Corresponds to the axis that contains the Index. Returns ------- shifted : Series/DataFrame Notes ----- If freq is not specified then tries to use the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown """ warnings.warn( ( "tshift is deprecated and will be removed in a future version. " "Please use shift instead." ), FutureWarning, stacklevel=find_stack_level(), ) if freq is None: freq = "infer" return self.shift(periods, freq, axis) def truncate( self: NDFrameT, before=None, after=None, axis=None, copy: bool_t = True ) -> NDFrameT: """ Truncate a Series or DataFrame before and after some index value. This is a useful shorthand for boolean indexing based on index values above or below certain thresholds. Parameters ---------- before : date, str, int Truncate all rows before this index value. after : date, str, int Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'}, optional Axis to truncate. Truncates the index (rows) by default. copy : bool, default is True, Return a copy of the truncated section. Returns ------- type of caller The truncated Series or DataFrame. See Also -------- DataFrame.loc : Select a subset of a DataFrame by label. DataFrame.iloc : Select a subset of a DataFrame by position. Notes ----- If the index being truncated contains only datetime values, `before` and `after` may be specified as strings instead of Timestamps. Examples -------- >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) >>> df A B C 1 a f k 2 b g l 3 c h m 4 d i n 5 e j o >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n The columns of a DataFrame can be truncated. >>> df.truncate(before="A", after="B", axis="columns") A B 1 a f 2 b g 3 c h 4 d i 5 e j For Series, only rows can be truncated. >>> df['A'].truncate(before=2, after=4) 2 b 3 c 4 d Name: A, dtype: object The index values in ``truncate`` can be datetimes or string dates. >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) >>> df.tail() A 2016-01-31 23:59:56 1 2016-01-31 23:59:57 1 2016-01-31 23:59:58 1 2016-01-31 23:59:59 1 2016-02-01 00:00:00 1 >>> df.truncate(before=pd.Timestamp('2016-01-05'), ... after=pd.Timestamp('2016-01-10')).tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Because the index is a DatetimeIndex containing only dates, we can specify `before` and `after` as strings. They will be coerced to Timestamps before truncation. >>> df.truncate('2016-01-05', '2016-01-10').tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Note that ``truncate`` assumes a 0 value for any unspecified time component (midnight). This differs from partial string slicing, which returns any partially matching dates. >>> df.loc['2016-01-05':'2016-01-10', :].tail() A 2016-01-10 23:59:55 1 2016-01-10 23:59:56 1 2016-01-10 23:59:57 1 2016-01-10 23:59:58 1 2016-01-10 23:59:59 1 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) ax = self._get_axis(axis) # GH 17935 # Check that index is sorted if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing: raise ValueError("truncate requires a sorted index") # if we have a date index, convert to dates, otherwise # treat like a slice if ax._is_all_dates: from pandas.core.tools.datetimes import to_datetime before = to_datetime(before) after = to_datetime(after) if before is not None and after is not None and before > after: raise ValueError(f"Truncate: {after} must be after {before}") if len(ax) > 1 and ax.is_monotonic_decreasing and ax.nunique() > 1: before, after = after, before slicer = [slice(None, None)] * self._AXIS_LEN slicer[axis] = slice(before, after) result = self.loc[tuple(slicer)] if isinstance(ax, MultiIndex): setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) if copy: result = result.copy() return result @final def tz_convert( self: NDFrameT, tz, axis=0, level=None, copy: bool_t = True ) -> NDFrameT: """ Convert tz-aware axis to target time zone. Parameters ---------- tz : str or tzinfo object axis : the axis to convert level : int, str, default None If axis is a MultiIndex, convert a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. Returns ------- {klass} Object with time zone converted axis. Raises ------ TypeError If the axis is tz-naive. """ axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_convert(ax, tz): if not hasattr(ax, "tz_convert"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) else: ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_convert(tz) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_convert(ax.levels[level], tz) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_convert(ax, tz) result = self.copy(deep=copy) result = result.set_axis(ax, axis=axis, inplace=False) return result.__finalize__(self, method="tz_convert") @final def tz_localize( self: NDFrameT, tz, axis=0, level=None, copy: bool_t = True, ambiguous="raise", nonexistent: str = "raise", ) -> NDFrameT: """ Localize tz-naive index of a Series or DataFrame to target time zone. This operation localizes the Index. To localize the values in a timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- tz : str or tzinfo axis : the axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False designates a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times. nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. Valid values are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times. Returns ------- Series or DataFrame Same type as the input. Raises ------ TypeError If the TimeSeries is tz-aware and tz is not None. Examples -------- Localize local times: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00'])) >>> s.tz_localize('CET') 2018-09-15 01:30:00+02:00 1 dtype: int64 Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.Series(range(7), ... index=pd.DatetimeIndex(['2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.tz_localize('CET', ambiguous='infer') 2018-10-28 01:30:00+02:00 0 2018-10-28 02:00:00+02:00 1 2018-10-28 02:30:00+02:00 2 2018-10-28 02:00:00+01:00 3 2018-10-28 02:30:00+01:00 4 2018-10-28 03:00:00+01:00 5 2018-10-28 03:30:00+01:00 6 dtype: int64 In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.Series(range(3), ... index=pd.DatetimeIndex(['2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) 2018-10-28 01:20:00+02:00 0 2018-10-28 02:36:00+02:00 1 2018-10-28 03:46:00+01:00 2 dtype: int64 If the DST transition causes nonexistent times, you can shift these dates forward or backward with a timedelta object or `'shift_forward'` or `'shift_backward'`. >>> s = pd.Series(range(2), ... index=pd.DatetimeIndex(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 2015-03-29 03:00:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 2015-03-29 01:59:59.999999999+01:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 2015-03-29 03:30:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 """ nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward") if nonexistent not in nonexistent_options and not isinstance( nonexistent, timedelta ): raise ValueError( "The nonexistent argument must be one of 'raise', " "'NaT', 'shift_forward', 'shift_backward' or " "a timedelta object" ) axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_localize(ax, tz, ambiguous, nonexistent): if not hasattr(ax, "tz_localize"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) else: ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self.copy(deep=copy) result = result.set_axis(ax, axis=axis, inplace=False) return result.__finalize__(self, method="tz_localize") # ---------------------------------------------------------------------- # Numeric Methods @final def describe( self: NDFrameT, percentiles=None, include=None, exclude=None, datetime_is_numeric=False, ) -> NDFrameT: """ Generate descriptive statistics. Descriptive statistics include those that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list-like of numbers, optional The percentiles to include in the output. All should fall between 0 and 1. The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and 75th percentiles. include : 'all', list-like of dtypes or None (default), optional A white list of data types to include in the result. Ignored for ``Series``. Here are the options: - 'all' : All columns of the input will be included in the output. - A list-like of dtypes : Limits the results to the provided data types. To limit the result to numeric types submit ``numpy.number``. To limit it instead to object columns submit the ``numpy.object`` data type. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To select pandas categorical columns, use ``'category'`` - None (default) : The result will include all numeric columns. exclude : list-like of dtypes or None (default), optional, A black list of data types to omit from the result. Ignored for ``Series``. Here are the options: - A list-like of dtypes : Excludes the provided data types from the result. To exclude numeric types submit ``numpy.number``. To exclude object columns submit the data type ``numpy.object``. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(exclude=['O'])``). To exclude pandas categorical columns, use ``'category'`` - None (default) : The result will exclude nothing. datetime_is_numeric : bool, default False Whether to treat datetime dtypes as numeric. This affects statistics calculated for the column. For DataFrame input, this also controls whether datetime columns are included by default. .. versionadded:: 1.1.0 Returns ------- Series or DataFrame Summary statistics of the Series or Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the observations. DataFrame.select_dtypes: Subset of a DataFrame including/excluding columns based on their dtype. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and upper percentiles. By default the lower percentile is ``25`` and the upper percentile is ``75``. The ``50`` percentile is the same as the median. For object data (e.g. strings or timestamps), the result's index will include ``count``, ``unique``, ``top``, and ``freq``. The ``top`` is the most common value. The ``freq`` is the most common value's frequency. Timestamps also include the ``first`` and ``last`` items. If multiple object values have the highest count, then the ``count`` and ``top`` results will be arbitrarily chosen from among those with the highest count. For mixed data types provided via a ``DataFrame``, the default is to return only an analysis of numeric columns. If the dataframe consists only of object and categorical data without any numeric columns, the default is to return an analysis of both the object and categorical columns. If ``include='all'`` is provided as an option, the result will include a union of attributes of each type. The `include` and `exclude` parameters can be used to limit which columns in a ``DataFrame`` are analyzed for the output. The parameters are ignored when analyzing a ``Series``. Examples -------- Describing a numeric ``Series``. >>> s = pd.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 dtype: float64 Describing a categorical ``Series``. >>> s = pd.Series(['a', 'a', 'b', 'c']) >>> s.describe() count 4 unique 3 top a freq 2 dtype: object Describing a timestamp ``Series``. >>> s = pd.Series([ ... np.datetime64("2000-01-01"), ... np.datetime64("2010-01-01"), ... np.datetime64("2010-01-01") ... ]) >>> s.describe(datetime_is_numeric=True) count 3 mean 2006-09-01 08:00:00 min 2000-01-01 00:00:00 25% 2004-12-31 12:00:00 50% 2010-01-01 00:00:00 75% 2010-01-01 00:00:00 max 2010-01-01 00:00:00 dtype: object Describing a ``DataFrame``. By default only numeric fields are returned. >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']), ... 'numeric': [1, 2, 3], ... 'object': ['a', 'b', 'c'] ... }) >>> df.describe() numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Describing all columns of a ``DataFrame`` regardless of data type. >>> df.describe(include='all') # doctest: +SKIP categorical numeric object count 3 3.0 3 unique 3 NaN 3 top f NaN a freq 1 NaN 1 mean NaN 2.0 NaN std NaN 1.0 NaN min NaN 1.0 NaN 25% NaN 1.5 NaN 50% NaN 2.0 NaN 75% NaN 2.5 NaN max NaN 3.0 NaN Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Name: numeric, dtype: float64 Including only numeric columns in a ``DataFrame`` description. >>> df.describe(include=[np.number]) numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Including only string columns in a ``DataFrame`` description. >>> df.describe(include=[object]) # doctest: +SKIP object count 3 unique 3 top a freq 1 Including only categorical columns from a ``DataFrame`` description. >>> df.describe(include=['category']) categorical count 3 unique 3 top d freq 1 Excluding numeric columns from a ``DataFrame`` description. >>> df.describe(exclude=[np.number]) # doctest: +SKIP categorical object count 3 3 unique 3 3 top f a freq 1 1 Excluding object columns from a ``DataFrame`` description. >>> df.describe(exclude=[object]) # doctest: +SKIP categorical numeric count 3 3.0 unique 3 NaN top f NaN freq 1 NaN mean NaN 2.0 std NaN 1.0 min NaN 1.0 25% NaN 1.5 50% NaN 2.0 75% NaN 2.5 max NaN 3.0 """ return describe_ndframe( obj=self, include=include, exclude=exclude, datetime_is_numeric=datetime_is_numeric, percentiles=percentiles, ) @final def pct_change( self: NDFrameT, periods=1, fill_method="pad", limit=None, freq=None, **kwargs, ) -> NDFrameT: """ Percentage change between the current and a prior element. Computes the percentage change from the immediately previous row by default. This is useful in comparing the percentage of change in a time series of elements. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. fill_method : str, default 'pad' How to handle NAs **before** computing percent changes. limit : int, default None The number of consecutive NAs to fill before stopping. freq : DateOffset, timedelta, or str, optional Increment to use from time series API (e.g. 'M' or BDay()). **kwargs Additional keyword arguments are passed into `DataFrame.shift` or `Series.shift`. Returns ------- chg : Series or DataFrame The same type as the calling object. See Also -------- Series.diff : Compute the difference of two elements in a Series. DataFrame.diff : Compute the difference of two elements in a DataFrame. Series.shift : Shift the index by some number of periods. DataFrame.shift : Shift the index by some number of periods. Examples -------- **Series** >>> s = pd.Series([90, 91, 85]) >>> s 0 90 1 91 2 85 dtype: int64 >>> s.pct_change() 0 NaN 1 0.011111 2 -0.065934 dtype: float64 >>> s.pct_change(periods=2) 0 NaN 1 NaN 2 -0.055556 dtype: float64 See the percentage change in a Series where filling NAs with last valid observation forward to next valid. >>> s = pd.Series([90, 91, None, 85]) >>> s 0 90.0 1 91.0 2 NaN 3 85.0 dtype: float64 >>> s.pct_change(fill_method='ffill') 0 NaN 1 0.011111 2 0.000000 3 -0.065934 dtype: float64 **DataFrame** Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. >>> df = pd.DataFrame({ ... 'FR': [4.0405, 4.0963, 4.3149], ... 'GR': [1.7246, 1.7482, 1.8519], ... 'IT': [804.74, 810.01, 860.13]}, ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df FR GR IT 1980-01-01 4.0405 1.7246 804.74 1980-02-01 4.0963 1.7482 810.01 1980-03-01 4.3149 1.8519 860.13 >>> df.pct_change() FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 0.013810 0.013684 0.006549 1980-03-01 0.053365 0.059318 0.061876 Percentage of change in GOOG and APPL stock volume. Shows computing the percentage change between columns. >>> df = pd.DataFrame({ ... '2016': [1769950, 30586265], ... '2015': [1500923, 40912316], ... '2014': [1371819, 41403351]}, ... index=['GOOG', 'APPL']) >>> df 2016 2015 2014 GOOG 1769950 1500923 1371819 APPL 30586265 40912316 41403351 >>> df.pct_change(axis='columns', periods=-1) 2016 2015 2014 GOOG 0.179241 0.094112 NaN APPL -0.252395 -0.011860 NaN """ axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name)) if fill_method is None: data = self else: _data = self.fillna(method=fill_method, axis=axis, limit=limit) assert _data is not None # needed for mypy data = _data shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs) # Unsupported left operand type for / ("NDFrameT") rs = data / shifted - 1 # type: ignore[operator] if freq is not None: # Shift method is implemented differently when freq is not None # We want to restore the original index rs = rs.loc[~rs.index.duplicated()] rs = rs.reindex_like(data) return rs.__finalize__(self, method="pct_change") @final def _agg_by_level( self, name: str, axis: Axis = 0, level: Level = 0, skipna: bool_t = True, **kwargs, ): if axis is None: raise ValueError("Must specify 'axis' when aggregating by level.") grouped = self.groupby(level=level, axis=axis, sort=False) if hasattr(grouped, name) and skipna: return getattr(grouped, name)(**kwargs) axis = self._get_axis_number(axis) method = getattr(type(self), name) applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs) return grouped.aggregate(applyf) @final def _logical_func( self, name: str, func, axis: Axis = 0, bool_only: bool_t | None = None, skipna: bool_t = True, level: Level | None = None, **kwargs, ) -> Series | bool_t: nv.validate_logical_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if level is not None: warnings.warn( "Using the level keyword in DataFrame and Series aggregations is " "deprecated and will be removed in a future version. Use groupby " "instead. df.any(level=1) should use df.groupby(level=1).any()", FutureWarning, stacklevel=find_stack_level(), ) if bool_only is not None: raise NotImplementedError( "Option bool_only is not implemented with option level." ) return self._agg_by_level(name, axis=axis, level=level, skipna=skipna) if self.ndim > 1 and axis is None: # Reduce along one dimension then the other, to simplify DataFrame._reduce res = self._logical_func( name, func, axis=0, bool_only=bool_only, skipna=skipna, **kwargs ) return res._logical_func(name, func, skipna=skipna, **kwargs) if ( self.ndim > 1 and axis == 1 and len(self._mgr.arrays) > 1 # TODO(EA2D): special-case not needed and all(x.ndim == 2 for x in self._mgr.arrays) and bool_only is not None and not kwargs ): # Fastpath avoiding potentially expensive transpose obj = self if bool_only: obj = self._get_bool_data() return obj._reduce_axis1(name, func, skipna=skipna) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=bool_only, filter_type="bool", ) def any( self, axis: Axis = 0, bool_only: bool_t | None = None, skipna: bool_t = True, level: Level | None = None, **kwargs, ) -> Series | bool_t: return self._logical_func( "any", nanops.nanany, axis, bool_only, skipna, level, **kwargs ) def all( self, axis: Axis = 0, bool_only: bool_t | None = None, skipna: bool_t = True, level: Level | None = None, **kwargs, ) -> Series | bool_t: return self._logical_func( "all", nanops.nanall, axis, bool_only, skipna, level, **kwargs ) @final def _accum_func( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs, ): skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name) if axis is None: axis = self._stat_axis_number else: axis = self._get_axis_number(axis) if axis == 1: return self.T._accum_func( name, func, axis=0, skipna=skipna, *args, **kwargs ).T def block_accum_func(blk_values): values = blk_values.T if hasattr(blk_values, "T") else blk_values result = nanops.na_accum_func(values, func, skipna=skipna) result = result.T if hasattr(result, "T") else result return result result = self._mgr.apply(block_accum_func) return self._constructor(result).__finalize__(self, method=name) def cummax(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummax", np.maximum.accumulate, axis, skipna, *args, **kwargs ) def cummin(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummin", np.minimum.accumulate, axis, skipna, *args, **kwargs ) def cumsum(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumsum", np.cumsum, axis, skipna, *args, **kwargs) def cumprod(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumprod", np.cumprod, axis, skipna, *args, **kwargs) @final def _stat_function_ddof( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, level: Level | None = None, ddof: int = 1, numeric_only: bool_t | None = None, **kwargs, ) -> Series | float: nv.validate_stat_ddof_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number if level is not None: warnings.warn( "Using the level keyword in DataFrame and Series aggregations is " "deprecated and will be removed in a future version. Use groupby " "instead. df.var(level=1) should use df.groupby(level=1).var().", FutureWarning, stacklevel=find_stack_level(), ) return self._agg_by_level( name, axis=axis, level=level, skipna=skipna, ddof=ddof ) return self._reduce( func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, level: Level | None = None, ddof: int = 1, numeric_only: bool_t | None = None, **kwargs, ) -> Series | float: return self._stat_function_ddof( "sem", nanops.nansem, axis, skipna, level, ddof, numeric_only, **kwargs ) def var( self, axis: Axis | None = None, skipna: bool_t = True, level: Level | None = None, ddof: int = 1, numeric_only: bool_t | None = None, **kwargs, ) -> Series | float: return self._stat_function_ddof( "var", nanops.nanvar, axis, skipna, level, ddof, numeric_only, **kwargs ) def std( self, axis: Axis | None = None, skipna: bool_t = True, level: Level | None = None, ddof: int = 1, numeric_only: bool_t | None = None, **kwargs, ) -> Series | float: return self._stat_function_ddof( "std", nanops.nanstd, axis, skipna, level, ddof, numeric_only, **kwargs ) @final def _stat_function( self, name: str, func, axis: Axis | None | lib.NoDefault = None, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, **kwargs, ): if name == "median": nv.validate_median((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None and level is None and self.ndim > 1: # user must have explicitly passed axis=None # GH#21597 warnings.warn( f"In a future version, DataFrame.{name}(axis=None) will return a " f"scalar {name} over the entire DataFrame. To retain the old " f"behavior, use 'frame.{name}(axis=0)' or just 'frame.{name}()'", FutureWarning, stacklevel=find_stack_level(), ) if axis is lib.no_default: axis = None if axis is None: axis = self._stat_axis_number axis = cast(Axis, axis) if level is not None: warnings.warn( "Using the level keyword in DataFrame and Series aggregations is " "deprecated and will be removed in a future version. Use groupby " "instead. df.median(level=1) should use df.groupby(level=1).median().", FutureWarning, stacklevel=find_stack_level(), ) return self._agg_by_level( name, axis=axis, level=level, skipna=skipna, numeric_only=numeric_only ) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only ) def min( self, axis: Axis | None | lib.NoDefault = lib.no_default, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, **kwargs, ): return self._stat_function( "min", nanops.nanmin, axis, skipna, level, numeric_only, **kwargs, ) def max( self, axis: Axis | None | lib.NoDefault = lib.no_default, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, **kwargs, ): return self._stat_function( "max", nanops.nanmax, axis, skipna, level, numeric_only, **kwargs, ) def mean( self, axis: Axis | None | lib.NoDefault = lib.no_default, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, **kwargs, ) -> Series | float: return self._stat_function( "mean", nanops.nanmean, axis, skipna, level, numeric_only, **kwargs ) def median( self, axis: Axis | None | lib.NoDefault = lib.no_default, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, **kwargs, ) -> Series | float: return self._stat_function( "median", nanops.nanmedian, axis, skipna, level, numeric_only, **kwargs ) def skew( self, axis: Axis | None | lib.NoDefault = lib.no_default, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, **kwargs, ) -> Series | float: return self._stat_function( "skew", nanops.nanskew, axis, skipna, level, numeric_only, **kwargs ) def kurt( self, axis: Axis | None | lib.NoDefault = lib.no_default, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, **kwargs, ) -> Series | float: return self._stat_function( "kurt", nanops.nankurt, axis, skipna, level, numeric_only, **kwargs ) kurtosis = kurt @final def _min_count_stat_function( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, min_count: int = 0, **kwargs, ): if name == "sum": nv.validate_sum((), kwargs) elif name == "prod": nv.validate_prod((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number if level is not None: warnings.warn( "Using the level keyword in DataFrame and Series aggregations is " "deprecated and will be removed in a future version. Use groupby " "instead. df.sum(level=1) should use df.groupby(level=1).sum().", FutureWarning, stacklevel=find_stack_level(), ) return self._agg_by_level( name, axis=axis, level=level, skipna=skipna, min_count=min_count, numeric_only=numeric_only, ) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, min_count=0, **kwargs, ): return self._min_count_stat_function( "sum", nanops.nansum, axis, skipna, level, numeric_only, min_count, **kwargs ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "prod", nanops.nanprod, axis, skipna, level, numeric_only, min_count, **kwargs, ) product = prod def mad( self, axis: Axis | None = None, skipna: bool_t = True, level: Level | None = None, ) -> Series | float: """ {desc} Parameters ---------- axis : {axis_descr} Axis for the function to be applied on. skipna : bool, default True Exclude NA/null values when computing the result. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a {name1}. Returns ------- {name1} or {name2} (if level specified)\ {see_also}\ {examples} """ if not is_bool(skipna): warnings.warn( "Passing None for skipna is deprecated and will raise in a future" "version. Pass True instead. Only boolean values will be allowed " "in the future.", FutureWarning, stacklevel=find_stack_level(), ) skipna = True if axis is None: axis = self._stat_axis_number if level is not None: warnings.warn( "Using the level keyword in DataFrame and Series aggregations is " "deprecated and will be removed in a future version. Use groupby " "instead. df.mad(level=1) should use df.groupby(level=1).mad()", FutureWarning, stacklevel=find_stack_level(), ) return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna) data = self._get_numeric_data() if axis == 0: demeaned = data - data.mean(axis=0) else: demeaned = data.sub(data.mean(axis=1), axis=0) return np.abs(demeaned).mean(axis=axis, skipna=skipna) @classmethod def _add_numeric_operations(cls): """ Add the operations to the cls; evaluate the doc strings again """ axis_descr, name1, name2 = _doc_params(cls) @doc( _bool_doc, desc=_any_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_any_see_also, examples=_any_examples, empty_value=False, ) def any(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs): return NDFrame.any(self, axis, bool_only, skipna, level, **kwargs) setattr(cls, "any", any) @doc( _bool_doc, desc=_all_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_all_see_also, examples=_all_examples, empty_value=True, ) def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs): return NDFrame.all(self, axis, bool_only, skipna, level, **kwargs) setattr(cls, "all", all) # error: Argument 1 to "doc" has incompatible type "Optional[str]"; expected # "Union[str, Callable[..., Any]]" @doc( NDFrame.mad.__doc__, # type: ignore[arg-type] desc="Return the mean absolute deviation of the values " "over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, see_also="", examples="", ) def mad(self, axis=None, skipna=True, level=None): return NDFrame.mad(self, axis, skipna, level) setattr(cls, "mad", mad) @doc( _num_ddof_doc, desc="Return unbiased standard error of the mean over requested " "axis.\n\nNormalized by N-1 by default. This can be changed " "using the ddof argument", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples="", ) def sem( self, axis=None, skipna=True, level=None, ddof=1, numeric_only=None, **kwargs, ): return NDFrame.sem(self, axis, skipna, level, ddof, numeric_only, **kwargs) setattr(cls, "sem", sem) @doc( _num_ddof_doc, desc="Return unbiased variance over requested axis.\n\nNormalized by " "N-1 by default. This can be changed using the ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples=_var_examples, ) def var( self, axis=None, skipna=True, level=None, ddof=1, numeric_only=None, **kwargs, ): return NDFrame.var(self, axis, skipna, level, ddof, numeric_only, **kwargs) setattr(cls, "var", var) @doc( _num_ddof_doc, desc="Return sample standard deviation over requested axis." "\n\nNormalized by N-1 by default. This can be changed using the " "ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes=_std_notes, examples=_std_examples, ) def std( self, axis=None, skipna=True, level=None, ddof=1, numeric_only=None, **kwargs, ): return NDFrame.std(self, axis, skipna, level, ddof, numeric_only, **kwargs) setattr(cls, "std", std) @doc( _cnum_doc, desc="minimum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="min", examples=_cummin_examples, ) def cummin(self, axis=None, skipna=True, *args, **kwargs): return NDFrame.cummin(self, axis, skipna, *args, **kwargs) setattr(cls, "cummin", cummin) @doc( _cnum_doc, desc="maximum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="max", examples=_cummax_examples, ) def cummax(self, axis=None, skipna=True, *args, **kwargs): return NDFrame.cummax(self, axis, skipna, *args, **kwargs) setattr(cls, "cummax", cummax) @doc( _cnum_doc, desc="sum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="sum", examples=_cumsum_examples, ) def cumsum(self, axis=None, skipna=True, *args, **kwargs): return NDFrame.cumsum(self, axis, skipna, *args, **kwargs) setattr(cls, "cumsum", cumsum) @doc( _cnum_doc, desc="product", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="prod", examples=_cumprod_examples, ) def cumprod(self, axis=None, skipna=True, *args, **kwargs): return NDFrame.cumprod(self, axis, skipna, *args, **kwargs) setattr(cls, "cumprod", cumprod) @doc( _num_doc, desc="Return the sum of the values over the requested axis.\n\n" "This is equivalent to the method ``numpy.sum``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_sum_examples, ) def sum( self, axis=None, skipna=True, level=None, numeric_only=None, min_count=0, **kwargs, ): return NDFrame.sum( self, axis, skipna, level, numeric_only, min_count, **kwargs ) setattr(cls, "sum", sum) @doc( _num_doc, desc="Return the product of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_prod_examples, ) def prod( self, axis=None, skipna=True, level=None, numeric_only=None, min_count=0, **kwargs, ): return NDFrame.prod( self, axis, skipna, level, numeric_only, min_count, **kwargs ) setattr(cls, "prod", prod) cls.product = prod @doc( _num_doc, desc="Return the mean of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def mean( self, axis: int | None | lib.NoDefault = lib.no_default, skipna=True, level=None, numeric_only=None, **kwargs, ): return NDFrame.mean(self, axis, skipna, level, numeric_only, **kwargs) setattr(cls, "mean", mean) @doc( _num_doc, desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def skew( self, axis: int | None | lib.NoDefault = lib.no_default, skipna=True, level=None, numeric_only=None, **kwargs, ): return NDFrame.skew(self, axis, skipna, level, numeric_only, **kwargs) setattr(cls, "skew", skew) @doc( _num_doc, desc="Return unbiased kurtosis over requested axis.\n\n" "Kurtosis obtained using Fisher's definition of\n" "kurtosis (kurtosis of normal == 0.0). Normalized " "by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def kurt( self, axis: Axis | None | lib.NoDefault = lib.no_default, skipna=True, level=None, numeric_only=None, **kwargs, ): return NDFrame.kurt(self, axis, skipna, level, numeric_only, **kwargs) setattr(cls, "kurt", kurt) cls.kurtosis = kurt @doc( _num_doc, desc="Return the median of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def median( self, axis: int | None | lib.NoDefault = lib.no_default, skipna=True, level=None, numeric_only=None, **kwargs, ): return NDFrame.median(self, axis, skipna, level, numeric_only, **kwargs) setattr(cls, "median", median) # error: Untyped decorator makes function "max" untyped @doc( # type: ignore[misc] _num_doc, desc="Return the maximum of the values over the requested axis.\n\n" "If you want the *index* of the maximum, use ``idxmax``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmax``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_max_examples, ) def max( self, axis: int | None | lib.NoDefault = lib.no_default, skipna=True, level=None, numeric_only=None, **kwargs, ): return NDFrame.max(self, axis, skipna, level, numeric_only, **kwargs) setattr(cls, "max", max) # error: Untyped decorator makes function "max" untyped @doc( # type: ignore[misc] _num_doc, desc="Return the minimum of the values over the requested axis.\n\n" "If you want the *index* of the minimum, use ``idxmin``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmin``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_min_examples, ) def min( self, axis: int | None | lib.NoDefault = lib.no_default, skipna=True, level=None, numeric_only=None, **kwargs, ): return NDFrame.min(self, axis, skipna, level, numeric_only, **kwargs) setattr(cls, "min", min) @final @doc(Rolling) def rolling( self, window: int | timedelta | BaseOffset | BaseIndexer, min_periods: int | None = None, center: bool_t = False, win_type: str | None = None, on: str | None = None, axis: Axis = 0, closed: str | None = None, step: int | None = None, method: str = "single", ): axis = self._get_axis_number(axis) if win_type is not None: return Window( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) return Rolling( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) @final @doc(Expanding) def expanding( self, min_periods: int = 1, center: bool_t | None = None, axis: Axis = 0, method: str = "single", ) -> Expanding: axis = self._get_axis_number(axis) if center is not None: warnings.warn( "The `center` argument on `expanding` will be removed in the future.", FutureWarning, stacklevel=find_stack_level(), ) else: center = False return Expanding( self, min_periods=min_periods, center=center, axis=axis, method=method ) @final @doc(ExponentialMovingWindow) def ewm( self, com: float | None = None, span: float | None = None, halflife: float | TimedeltaConvertibleTypes | None = None, alpha: float | None = None, min_periods: int | None = 0, adjust: bool_t = True, ignore_na: bool_t = False, axis: Axis = 0, times: str | np.ndarray | DataFrame | Series | None = None, method: str = "single", ) -> ExponentialMovingWindow: axis = self._get_axis_number(axis) return ExponentialMovingWindow( self, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, axis=axis, times=times, method=method, ) # ---------------------------------------------------------------------- # Arithmetic Methods @final def _inplace_method(self, other, op): """ Wrap arithmetic method to operate inplace. """ result = op(self, other) if ( self.ndim == 1 and result._indexed_same(self) and is_dtype_equal(result.dtype, self.dtype) ): # GH#36498 this inplace op can _actually_ be inplace. self._values[:] = result._values return self # Delete cacher self._reset_cacher() # this makes sure that we are aligned like the input # we are updating inplace so we want to ignore is_copy self._update_inplace( result.reindex_like(self, copy=False), verify_is_copy=False ) return self def __iadd__(self, other): # error: Unsupported left operand type for + ("Type[NDFrame]") return self._inplace_method(other, type(self).__add__) # type: ignore[operator] def __isub__(self, other): # error: Unsupported left operand type for - ("Type[NDFrame]") return self._inplace_method(other, type(self).__sub__) # type: ignore[operator] def __imul__(self, other): # error: Unsupported left operand type for * ("Type[NDFrame]") return self._inplace_method(other, type(self).__mul__) # type: ignore[operator] def __itruediv__(self, other): # error: Unsupported left operand type for / ("Type[NDFrame]") return self._inplace_method( other, type(self).__truediv__ # type: ignore[operator] ) def __ifloordiv__(self, other): # error: Unsupported left operand type for // ("Type[NDFrame]") return self._inplace_method( other, type(self).__floordiv__ # type: ignore[operator] ) def __imod__(self, other): # error: Unsupported left operand type for % ("Type[NDFrame]") return self._inplace_method(other, type(self).__mod__) # type: ignore[operator] def __ipow__(self, other): # error: Unsupported left operand type for ** ("Type[NDFrame]") return self._inplace_method(other, type(self).__pow__) # type: ignore[operator] def __iand__(self, other): # error: Unsupported left operand type for & ("Type[NDFrame]") return self._inplace_method(other, type(self).__and__) # type: ignore[operator] def __ior__(self, other): # error: Unsupported left operand type for | ("Type[NDFrame]") return self._inplace_method(other, type(self).__or__) # type: ignore[operator] def __ixor__(self, other): # error: Unsupported left operand type for ^ ("Type[NDFrame]") return self._inplace_method(other, type(self).__xor__) # type: ignore[operator] # ---------------------------------------------------------------------- # Misc methods @final def _find_valid_index(self, *, how: str) -> Hashable | None: """ Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index """ idxpos = find_valid_index(self._values, how=how) if idxpos is None: return None return self.index[idxpos] @final @doc(position="first", klass=_shared_doc_kwargs["klass"]) def first_valid_index(self) -> Hashable | None: """ Return index for {position} non-NA value or None, if no non-NA value is found. Returns ------- scalar : type of index Notes ----- If all elements are non-NA/null, returns None. Also returns None for empty {klass}. """ return self._find_valid_index(how="first") @final @doc(first_valid_index, position="last", klass=_shared_doc_kwargs["klass"]) def last_valid_index(self) -> Hashable | None: return self._find_valid_index(how="last") def _doc_params(cls): """Return a tuple of the doc params.""" axis_descr = ( f"{{{", ".join([f"{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS)])}}}" ) name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar" name2 = cls.__name__ return axis_descr, name, name2 _num_doc = """ {desc} Parameters ---------- axis : {axis_descr} Axis for the function to be applied on. skipna : bool, default True Exclude NA/null values when computing the result. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a {name1}. numeric_only : bool, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. {min_count}\ **kwargs Additional keyword arguments to be passed to the function. Returns ------- {name1} or {name2} (if level specified)\ {see_also}\ {examples} """ _num_ddof_doc = """ {desc} Parameters ---------- axis : {axis_descr} skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a {name1}. ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. numeric_only : bool, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. Returns ------- {name1} or {name2} (if level specified) \ {notes}\ {examples} """ _std_notes = """ Notes ----- To have the same behaviour as `numpy.std`, use `ddof=0` (instead of the default `ddof=1`)""" _std_examples = """ Examples -------- >>> df = pd.DataFrame({'person_id': [0, 1, 2, 3], ... 'age': [21, 25, 62, 43], ... 'height': [1.61, 1.87, 1.49, 2.01]} ... ).set_index('person_id') >>> df age height person_id 0 21 1.61 1 25 1.87 2 62 1.49 3 43 2.01 The standard deviation of the columns can be found as follows: >>> df.std() age 18.786076 height 0.237417 Alternatively, `ddof=0` can be set to normalize by N instead of N-1: >>> df.std(ddof=0) age 16.269219 height 0.205609""" _var_examples = """ Examples -------- >>> df = pd.DataFrame({'person_id': [0, 1, 2, 3], ... 'age': [21, 25, 62, 43], ... 'height': [1.61, 1.87, 1.49, 2.01]} ... ).set_index('person_id') >>> df age height person_id 0 21 1.61 1 25 1.87 2 62 1.49 3 43 2.01 >>> df.var() age 352.916667 height 0.056367 Alternatively, ``ddof=0`` can be set to normalize by N instead of N-1: >>> df.var(ddof=0) age 264.687500 height 0.042275""" _bool_doc = """ {desc} Parameters ---------- axis : {{0 or 'index', 1 or 'columns', None}}, default 0 Indicate which axis or axes should be reduced. * 0 / 'index' : reduce the index, return a Series whose index is the original column labels. * 1 / 'columns' : reduce the columns, return a Series whose index is the original index. * None : reduce all axes, return a scalar. bool_only : bool, default None Include only boolean columns. If None, will attempt to use everything, then use only boolean data. Not implemented for Series. skipna : bool, default True Exclude NA/null values. If the entire row/column is NA and skipna is True, then the result will be {empty_value}, as for an empty row/column. If skipna is False, then NA are treated as True, because these are not equal to zero. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a {name1}. **kwargs : any, default None Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- {name1} or {name2} If level is specified, then, {name2} is returned; otherwise, {name1} is returned. {see_also} {examples}""" _all_desc = """\ Return whether all elements are True, potentially over an axis. Returns True unless there at least one element within a series or along a Dataframe axis that is False or equivalent (e.g. zero or empty).""" _all_examples = """\ Examples -------- **Series** >>> pd.Series([True, True]).all() True >>> pd.Series([True, False]).all() False >>> pd.Series([], dtype="float64").all() True >>> pd.Series([np.nan]).all() True >>> pd.Series([np.nan]).all(skipna=False) True **DataFrames** Create a dataframe from a dictionary. >>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]}) >>> df col1 col2 0 True True 1 True False Default behaviour checks if column-wise values all return True. >>> df.all() col1 True col2 False dtype: bool Specify ``axis='columns'`` to check if row-wise values all return True. >>> df.all(axis='columns') 0 True 1 False dtype: bool Or ``axis=None`` for whether every value is True. >>> df.all(axis=None) False """ _all_see_also = """\ See Also -------- Series.all : Return True if all elements are True. DataFrame.any : Return True if one (or more) elements are True. """ _cnum_doc = """ Return cumulative {desc} over a DataFrame or Series axis. Returns a DataFrame or Series of the same size containing the cumulative {desc}. Parameters ---------- axis : {{0 or 'index', 1 or 'columns'}}, default 0 The index or the name of the axis. 0 is equivalent to None or 'index'. skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- {name1} or {name2} Return cumulative {desc} of {name1} or {name2}. See Also -------- core.window.Expanding.{accum_func_name} : Similar functionality but ignores ``NaN`` values. {name2}.{accum_func_name} : Return the {desc} over {name2} axis. {name2}.cummax : Return cumulative maximum over {name2} axis. {name2}.cummin : Return cumulative minimum over {name2} axis. {name2}.cumsum : Return cumulative sum over {name2} axis. {name2}.cumprod : Return cumulative product over {name2} axis. {examples}""" _cummin_examples = """\ Examples -------- **Series** >>> s = pd.Series([2, np.nan, 5, -1, 0]) >>> s 0 2.0 1 NaN 2 5.0 3 -1.0 4 0.0 dtype: float64 By default, NA values are ignored. >>> s.cummin() 0 2.0 1 NaN 2 2.0 3 -1.0 4 -1.0 dtype: float64 To include NA values in the operation, use ``skipna=False`` >>> s.cummin(skipna=False) 0 2.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 **DataFrame** >>> df = pd.DataFrame([[2.0, 1.0], ... [3.0, np.nan], ... [1.0, 0.0]], ... columns=list('AB')) >>> df A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 By default, iterates over rows and finds the minimum in each column. This is equivalent to ``axis=None`` or ``axis='index'``. >>> df.cummin() A B 0 2.0 1.0 1 2.0 NaN 2 1.0 0.0 To iterate over columns and find the minimum in each row, use ``axis=1`` >>> df.cummin(axis=1) A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 """ _cumsum_examples = """\ Examples -------- **Series** >>> s = pd.Series([2, np.nan, 5, -1, 0]) >>> s 0 2.0 1 NaN 2 5.0 3 -1.0 4 0.0 dtype: float64 By default, NA values are ignored. >>> s.cumsum() 0 2.0 1 NaN 2 7.0 3 6.0 4 6.0 dtype: float64 To include NA values in the operation, use ``skipna=False`` >>> s.cumsum(skipna=False) 0 2.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 **DataFrame** >>> df = pd.DataFrame([[2.0, 1.0], ... [3.0, np.nan], ... [1.0, 0.0]], ... columns=list('AB')) >>> df A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 By default, iterates over rows and finds the sum in each column. This is equivalent to ``axis=None`` or ``axis='index'``. >>> df.cumsum() A B 0 2.0 1.0 1 5.0 NaN 2 6.0 1.0 To iterate over columns and find the sum in each row, use ``axis=1`` >>> df.cumsum(axis=1) A B 0 2.0 3.0 1 3.0 NaN 2 1.0 1.0 """ _cumprod_examples = """\ Examples -------- **Series** >>> s = pd.Series([2, np.nan, 5, -1, 0]) >>> s 0 2.0 1 NaN 2 5.0 3 -1.0 4 0.0 dtype: float64 By default, NA values are ignored. >>> s.cumprod() 0 2.0 1 NaN 2 10.0 3 -10.0 4 -0.0 dtype: float64 To include NA values in the operation, use ``skipna=False`` >>> s.cumprod(skipna=False) 0 2.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 **DataFrame** >>> df = pd.DataFrame([[2.0, 1.0], ... [3.0, np.nan], ... [1.0, 0.0]], ... columns=list('AB')) >>> df A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 By default, iterates over rows and finds the product in each column. This is equivalent to ``axis=None`` or ``axis='index'``. >>> df.cumprod() A B 0 2.0 1.0 1 6.0 NaN 2 6.0 0.0 To iterate over columns and find the product in each row, use ``axis=1`` >>> df.cumprod(axis=1) A B 0 2.0 2.0 1 3.0 NaN 2 1.0 0.0 """ _cummax_examples = """\ Examples -------- **Series** >>> s = pd.Series([2, np.nan, 5, -1, 0]) >>> s 0 2.0 1 NaN 2 5.0 3 -1.0 4 0.0 dtype: float64 By default, NA values are ignored. >>> s.cummax() 0 2.0 1 NaN 2 5.0 3 5.0 4 5.0 dtype: float64 To include NA values in the operation, use ``skipna=False`` >>> s.cummax(skipna=False) 0 2.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 **DataFrame** >>> df = pd.DataFrame([[2.0, 1.0], ... [3.0, np.nan], ... [1.0, 0.0]], ... columns=list('AB')) >>> df A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 By default, iterates over rows and finds the maximum in each column. This is equivalent to ``axis=None`` or ``axis='index'``. >>> df.cummax() A B 0 2.0 1.0 1 3.0 NaN 2 3.0 1.0 To iterate over columns and find the maximum in each row, use ``axis=1`` >>> df.cummax(axis=1) A B 0 2.0 2.0 1 3.0 NaN 2 1.0 1.0 """ _any_see_also = """\ See Also -------- numpy.any : Numpy version of this method. Series.any : Return whether any element is True. Series.all : Return whether all elements are True. DataFrame.any : Return whether any element is True over requested axis. DataFrame.all : Return whether all elements are True over requested axis. """ _any_desc = """\ Return whether any element is True, potentially over an axis. Returns False unless there is at least one element within a series or along a Dataframe axis that is True or equivalent (e.g. non-zero or non-empty).""" _any_examples = """\ Examples -------- **Series** For Series input, the output is a scalar indicating whether any element is True. >>> pd.Series([False, False]).any() False >>> pd.Series([True, False]).any() True >>> pd.Series([], dtype="float64").any() False >>> pd.Series([np.nan]).any() False >>> pd.Series([np.nan]).any(skipna=False) True **DataFrame** Whether each column contains at least one True element (the default). >>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]}) >>> df A B C 0 1 0 0 1 2 2 0 >>> df.any() A True B True C False dtype: bool Aggregating over the columns. >>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]}) >>> df A B 0 True 1 1 False 2 >>> df.any(axis='columns') 0 True 1 True dtype: bool >>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]}) >>> df A B 0 True 1 1 False 0 >>> df.any(axis='columns') 0 True 1 False dtype: bool Aggregating over the entire DataFrame with ``axis=None``. >>> df.any(axis=None) True `any` for an empty DataFrame is an empty Series. >>> pd.DataFrame([]).any() Series([], dtype: bool) """ _shared_docs[ "stat_func_example" ] = """ Examples -------- >>> idx = pd.MultiIndex.from_arrays([ ... ['warm', 'warm', 'cold', 'cold'], ... ['dog', 'falcon', 'fish', 'spider']], ... names=['blooded', 'animal']) >>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx) >>> s blooded animal warm dog 4 falcon 2 cold fish 0 spider 8 Name: legs, dtype: int64 >>> s.{stat_func}() {default_output}""" _sum_examples = _shared_docs["stat_func_example"].format( stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8 ) _sum_examples += """ By default, the sum of an empty or all-NA Series is ``0``. >>> pd.Series([], dtype="float64").sum() # min_count=0 is the default 0.0 This can be controlled with the ``min_count`` parameter. For example, if you'd like the sum of an empty series to be NaN, pass ``min_count=1``. >>> pd.Series([], dtype="float64").sum(min_count=1) nan Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and empty series identically. >>> pd.Series([np.nan]).sum() 0.0 >>> pd.Series([np.nan]).sum(min_count=1) nan""" _max_examples = _shared_docs["stat_func_example"].format( stat_func="max", verb="Max", default_output=8, level_output_0=4, level_output_1=8 ) _min_examples = _shared_docs["stat_func_example"].format( stat_func="min", verb="Min", default_output=0, level_output_0=2, level_output_1=0 ) _stat_func_see_also = """ See Also -------- Series.sum : Return the sum. Series.min : Return the minimum. Series.max : Return the maximum. Series.idxmin : Return the index of the minimum. Series.idxmax : Return the index of the maximum. DataFrame.sum : Return the sum over the requested axis. DataFrame.min : Return the minimum over the requested axis. DataFrame.max : Return the maximum over the requested axis. DataFrame.idxmin : Return the index of the minimum over the requested axis. DataFrame.idxmax : Return the index of the maximum over the requested axis.""" _prod_examples = """ Examples -------- By default, the product of an empty or all-NA Series is ``1`` >>> pd.Series([], dtype="float64").prod() 1.0 This can be controlled with the ``min_count`` parameter >>> pd.Series([], dtype="float64").prod(min_count=1) nan Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and empty series identically. >>> pd.Series([np.nan]).prod() 1.0 >>> pd.Series([np.nan]).prod(min_count=1) nan""" _min_count_stub = """\ min_count : int, default 0 The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the result will be NA. """ def _align_as_utc( left: NDFrameT, right: NDFrameT, join_index: Index | None ) -> tuple[NDFrameT, NDFrameT]: """ If we are aligning timezone-aware DatetimeIndexes and the timezones do not match, convert both to UTC. """ if is_datetime64tz_dtype(left.index.dtype): if left.index.tz != right.index.tz: if join_index is not None: # GH#33671 ensure we don't change the index on # our original Series (NB: by default deep=False) left = left.copy() right = right.copy() left.index = join_index right.index = join_index return left, right
# pyright: reportPropertyTypeMismatch=false from __future__ import annotations import collections from datetime import timedelta import functools import gc import json import operator import pickle import re from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Literal, Mapping, Sequence, Type, cast, final, overload, ) import warnings import weakref import numpy as np from pandas._config import config from pandas._libs import lib from pandas._libs.tslibs import ( Period, Tick, Timestamp, to_offset, ) from pandas._typing import ( ArrayLike, Axis, CompressionOptions, Dtype, DtypeArg, DtypeObj, FilePath, IndexKeyFunc, IndexLabel, IntervalClosedType, JSONSerializable, Level, Manager, NDFrameT, RandomState, Renamer, StorageOptions, T, TimedeltaConvertibleTypes, TimestampConvertibleTypes, ValueKeyFunc, WriteBuffer, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.errors import ( AbstractMethodError, InvalidIndexError, ) from pandas.util._decorators import ( deprecate_kwarg, doc, rewrite_axis_style_signature, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import ( validate_ascending, validate_bool_kwarg, validate_fillna_kwargs, validate_inclusive, ) from pandas.core.dtypes.common import ( ensure_object, ensure_platform_int, ensure_str, is_bool, is_bool_dtype, is_datetime64_any_dtype, is_datetime64tz_dtype, is_dict_like, is_dtype_equal, is_extension_array_dtype, is_float, is_list_like, is_number, is_numeric_dtype, is_re_compilable, is_scalar, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.inference import ( is_hashable, is_nested_list_like, ) from pandas.core.dtypes.missing import ( isna, notna, ) from pandas.core import ( algorithms as algos, arraylike, common as com, indexing, missing, nanops, sample, ) from pandas.core.array_algos.replace import should_use_regex from pandas.core.arrays import ExtensionArray from pandas.core.base import PandasObject from pandas.core.construction import ( create_series_with_explicit_dtype, extract_array, ) from pandas.core.describe import describe_ndframe from pandas.core.flags import Flags from pandas.core.indexes.api import ( DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, default_index, ensure_index, ) from pandas.core.internals import ( ArrayManager, BlockManager, SingleArrayManager, ) from pandas.core.internals.construction import mgr_to_mgr from pandas.core.missing import find_valid_index from pandas.core.ops import align_method_FRAME from pandas.core.reshape.concat import concat from pandas.core.shared_docs import _shared_docs from pandas.core.sorting import get_indexer_indexer from pandas.core.window import ( Expanding, ExponentialMovingWindow, Rolling, Window, ) from pandas.io.formats import format as fmt from pandas.io.formats.format import ( DataFrameFormatter, DataFrameRenderer, ) from pandas.io.formats.printing import pprint_thing if TYPE_CHECKING: from pandas._libs.tslibs import BaseOffset from pandas.core.frame import DataFrame from pandas.core.indexers.objects import BaseIndexer from pandas.core.resample import Resampler from pandas.core.series import Series # goal is to be able to define the docs close to function, while still being # able to share _shared_docs = {**_shared_docs} _shared_doc_kwargs = { "axes": "keywords for axes", "klass": "Series/DataFrame", "axes_single_arg": "int or labels for object", "args_transpose": "axes to permute (int or label for object)", "inplace": """ inplace : bool, default False If True, performs operation inplace and returns None.""", "optional_by": """ by : str or list of str Name or list of names to sort by""", "replace_iloc": """ This differs from updating with ``.loc`` or ``.iloc``, which require you to specify a location to update with some value.""", } bool_t = bool # Need alias because NDFrame has def bool: class NDFrame(PandasObject, indexing.IndexingMixin): """ N-dimensional analogue of DataFrame. Store multi-dimensional in a size-mutable, labeled data structure Parameters ---------- data : BlockManager axes : list copy : bool, default False """ _internal_names: list[str] = [ "_mgr", "_cacher", "_item_cache", "_cache", "_is_copy", "_subtyp", "_name", "_default_kind", "_default_fill_value", "_metadata", "__array_struct__", "__array_interface__", "_flags", ] _internal_names_set: set[str] = set(_internal_names) _accessors: set[str] = set() _hidden_attrs: frozenset[str] = frozenset( ["_AXIS_NAMES", "_AXIS_NUMBERS", "get_values", "tshift"] ) _metadata: list[str] = [] _is_copy: weakref.ReferenceType[NDFrame] | None = None _mgr: Manager _attrs: dict[Hashable, Any] _typ: str # ---------------------------------------------------------------------- # Constructors def __init__( self, data: Manager, copy: bool_t = False, attrs: Mapping[Hashable, Any] | None = None, ): # copy kwarg is retained for mypy compat, is not used object.__setattr__(self, "_is_copy", None) object.__setattr__(self, "_mgr", data) object.__setattr__(self, "_item_cache", {}) if attrs is None: attrs = {} else: attrs = dict(attrs) object.__setattr__(self, "_attrs", attrs) object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True)) @classmethod def _init_mgr( cls, mgr: Manager, axes, dtype: Dtype | None = None, copy: bool_t = False, ) -> Manager: """passed a manager and a axes dict""" for a, axe in axes.items(): if axe is not None: axe = ensure_index(axe) bm_axis = cls._get_block_manager_axis(a) mgr = mgr.reindex_axis(axe, axis=bm_axis) # make a copy if explicitly requested if copy: mgr = mgr.copy() if dtype is not None: # avoid further copies if we can if ( isinstance(mgr, BlockManager) and len(mgr.blocks) == 1 and is_dtype_equal(mgr.blocks[0].values.dtype, dtype) ): pass else: mgr = mgr.astype(dtype=dtype) return mgr def _as_manager(self: NDFrameT, typ: str, copy: bool_t = True) -> NDFrameT: """ Private helper function to create a DataFrame with specific manager. Parameters ---------- typ : {"block", "array"} copy : bool, default True Only controls whether the conversion from Block->ArrayManager copies the 1D arrays (to ensure proper/contiguous memory layout). Returns ------- DataFrame New DataFrame using specified manager type. Is not guaranteed to be a copy or not. """ new_mgr: Manager new_mgr = mgr_to_mgr(self._mgr, typ=typ, copy=copy) # fastpath of passing a manager doesn't check the option/manager class return self._constructor(new_mgr).__finalize__(self) # ---------------------------------------------------------------------- # attrs and flags @property def attrs(self) -> dict[Hashable, Any]: """ Dictionary of global attributes of this dataset. .. warning:: attrs is experimental and may change without warning. See Also -------- DataFrame.flags : Global flags applying to this object. """ if self._attrs is None: self._attrs = {} return self._attrs @attrs.setter def attrs(self, value: Mapping[Hashable, Any]) -> None: self._attrs = dict(value) @final @property def flags(self) -> Flags: """ Get the properties associated with this pandas object. The available flags are * :attr:`Flags.allows_duplicate_labels` See Also -------- Flags : Flags that apply to pandas objects. DataFrame.attrs : Global metadata applying to this dataset. Notes ----- "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags <Flags(allows_duplicate_labels=True)> Flags can be get or set using ``.`` >>> df.flags.allows_duplicate_labels True >>> df.flags.allows_duplicate_labels = False Or by slicing with a key >>> df.flags["allows_duplicate_labels"] False >>> df.flags["allows_duplicate_labels"] = True """ return self._flags @final def set_flags( self: NDFrameT, *, copy: bool_t = False, allows_duplicate_labels: bool_t | None = None, ) -> NDFrameT: """ Return a new object with updated flags. Parameters ---------- allows_duplicate_labels : bool, optional Whether the returned object allows duplicate labels. Returns ------- Series or DataFrame The same type as the caller. See Also -------- DataFrame.attrs : Global metadata applying to this dataset. DataFrame.flags : Global flags applying to this object. Notes ----- This method returns a new object that's a view on the same data as the input. Mutating the input or the output values will be reflected in the other. This method is intended to be used in method chains. "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags.allows_duplicate_labels True >>> df2 = df.set_flags(allows_duplicate_labels=False) >>> df2.flags.allows_duplicate_labels False """ df = self.copy(deep=copy) if allows_duplicate_labels is not None: df.flags["allows_duplicate_labels"] = allows_duplicate_labels return df @final @classmethod def _validate_dtype(cls, dtype) -> DtypeObj | None: """validate the passed dtype""" if dtype is not None: dtype = pandas_dtype(dtype) # a compound dtype if dtype.kind == "V": raise NotImplementedError( "compound dtypes are not implemented " f"in the {cls.__name__} constructor" ) return dtype # ---------------------------------------------------------------------- # Construction @property def _constructor(self: NDFrameT) -> Callable[..., NDFrameT]: """ Used when a manipulation result has the same dimensions as the original. """ raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Internals @final @property def _data(self): # GH#33054 retained because some downstream packages uses this, # e.g. fastparquet return self._mgr # ---------------------------------------------------------------------- # Axis _stat_axis_number = 0 _stat_axis_name = "index" _AXIS_ORDERS: list[str] _AXIS_TO_AXIS_NUMBER: dict[Axis, int] = {0: 0, "index": 0, "rows": 0} _info_axis_number: int _info_axis_name: str _AXIS_LEN: int @property def _AXIS_NUMBERS(self) -> dict[str, int]: """.. deprecated:: 1.1.0""" warnings.warn( "_AXIS_NUMBERS has been deprecated.", FutureWarning, stacklevel=find_stack_level(), ) return {"index": 0} @property def _AXIS_NAMES(self) -> dict[int, str]: """.. deprecated:: 1.1.0""" level = self.ndim + 1 warnings.warn( "_AXIS_NAMES has been deprecated.", FutureWarning, stacklevel=level ) return {0: "index"} @final def _construct_axes_dict(self, axes=None, **kwargs): """Return an axes dictionary for myself.""" d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} d.update(kwargs) return d @final @classmethod def _construct_axes_from_arguments( cls, args, kwargs, require_all: bool_t = False, sentinel=None ): """ Construct and returns axes if supplied in args/kwargs. If require_all, raise if all axis arguments are not supplied return a tuple of (axes, kwargs). sentinel specifies the default parameter when an axis is not supplied; useful to distinguish when a user explicitly passes None in scenarios where None has special meaning. """ # construct the args args = list(args) for a in cls._AXIS_ORDERS: # look for a argument by position if a not in kwargs: try: kwargs[a] = args.pop(0) except IndexError as err: if require_all: raise TypeError( "not enough/duplicate arguments specified!" ) from err axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS} return axes, kwargs @final @classmethod def _get_axis_number(cls, axis: Axis) -> int: try: return cls._AXIS_TO_AXIS_NUMBER[axis] except KeyError: raise ValueError(f"No axis named {axis} for object type {cls.__name__}") @final @classmethod def _get_axis_name(cls, axis: Axis) -> str: axis_number = cls._get_axis_number(axis) return cls._AXIS_ORDERS[axis_number] @final def _get_axis(self, axis: Axis) -> Index: axis_number = self._get_axis_number(axis) assert axis_number in {0, 1} return self.index if axis_number == 0 else self.columns @final @classmethod def _get_block_manager_axis(cls, axis: Axis) -> int: """Map the axis to the block_manager axis.""" axis = cls._get_axis_number(axis) ndim = cls._AXIS_LEN if ndim == 2: # i.e. DataFrame return 1 - axis return axis @final def _get_axis_resolvers(self, axis: str) -> dict[str, Series | MultiIndex]: # index or columns axis_index = getattr(self, axis) d = {} prefix = axis[0] for i, name in enumerate(axis_index.names): if name is not None: key = level = name else: # prefix with 'i' or 'c' depending on the input axis # e.g., you must do ilevel_0 for the 0th level of an unnamed # multiiindex key = f"{prefix}level_{i}" level = i level_values = axis_index.get_level_values(level) s = level_values.to_series() s.index = axis_index d[key] = s # put the index/columns itself in the dict if isinstance(axis_index, MultiIndex): dindex = axis_index else: dindex = axis_index.to_series() d[axis] = dindex return d @final def _get_index_resolvers(self) -> dict[Hashable, Series | MultiIndex]: from pandas.core.computation.parsing import clean_column_name d: dict[str, Series | MultiIndex] = {} for axis_name in self._AXIS_ORDERS: d.update(self._get_axis_resolvers(axis_name)) return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)} @final def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]: """ Return the special character free column resolvers of a dataframe. Column names with special characters are 'cleaned up' so that they can be referred to by backtick quoting. Used in :meth:`DataFrame.eval`. """ from pandas.core.computation.parsing import clean_column_name if isinstance(self, ABCSeries): return {clean_column_name(self.name): self} return { clean_column_name(k): v for k, v in self.items() if not isinstance(k, int) } @property def _info_axis(self) -> Index: return getattr(self, self._info_axis_name) @property def _stat_axis(self) -> Index: return getattr(self, self._stat_axis_name) @property def shape(self) -> tuple[int, ...]: """ Return a tuple of axis dimensions """ return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) @property def axes(self) -> list[Index]: """ Return index label(s) of the internal NDFrame """ # we do it this way because if we have reversed axes, then # the block manager shows then reversed return [self._get_axis(a) for a in self._AXIS_ORDERS] @property def ndim(self) -> int: """ Return an int representing the number of axes / array dimensions. Return 1 if Series. Otherwise return 2 if DataFrame. See Also -------- ndarray.ndim : Number of array dimensions. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.ndim 1 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.ndim 2 """ return self._mgr.ndim @property def size(self) -> int: """ Return an int representing the number of elements in this object. Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame. See Also -------- ndarray.size : Number of elements in the array. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.size 3 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.size 4 """ return np.prod(self.shape) @overload def set_axis( self: NDFrameT, labels, axis: Axis = ..., inplace: Literal[False] = ... ) -> NDFrameT: ... @overload def set_axis(self, labels, axis: Axis, inplace: Literal[True]) -> None: ... @overload def set_axis(self, labels, *, inplace: Literal[True]) -> None: ... @overload def set_axis( self: NDFrameT, labels, axis: Axis = ..., inplace: bool_t = ... ) -> NDFrameT | None: ... def set_axis(self, labels, axis: Axis = 0, inplace: bool_t = False): """ Assign desired index to given axis. Indexes for%(extended_summary_sub)s row labels can be changed by assigning a list-like or Index. Parameters ---------- labels : list-like, Index The values for the new index. axis : %(axes_single_arg)s, default 0 The axis to update. The value 0 identifies the rows%(axis_description_sub)s. inplace : bool, default False Whether to return a new %(klass)s instance. Returns ------- renamed : %(klass)s or None An object of type %(klass)s or None if ``inplace=True``. See Also -------- %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s. """ self._check_inplace_and_allows_duplicate_labels(inplace) return self._set_axis_nocheck(labels, axis, inplace) @final def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool_t): # NDFrame.rename with inplace=False calls set_axis(inplace=True) on a copy. if inplace: setattr(self, self._get_axis_name(axis), labels) else: obj = self.copy() obj.set_axis(labels, axis=axis, inplace=True) return obj def _set_axis(self, axis: int, labels: Index) -> None: labels = ensure_index(labels) self._mgr.set_axis(axis, labels) self._clear_item_cache() @final def swapaxes(self: NDFrameT, axis1, axis2, copy=True) -> NDFrameT: """ Interchange axes and swap values axes appropriately. Returns ------- y : same as input """ i = self._get_axis_number(axis1) j = self._get_axis_number(axis2) if i == j: if copy: return self.copy() return self mapping = {i: j, j: i} new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN)) new_values = self.values.swapaxes(i, j) if copy: new_values = new_values.copy() return self._constructor( new_values, *new_axes, ).__finalize__(self, method="swapaxes") @final @doc(klass=_shared_doc_kwargs["klass"]) def droplevel(self: NDFrameT, level, axis=0) -> NDFrameT: """ Return {klass} with requested index / column level(s) removed. Parameters ---------- level : int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Axis along which the level(s) is removed: * 0 or 'index': remove level(s) in column. * 1 or 'columns': remove level(s) in row. Returns ------- {klass} {klass} with requested index / column level(s) removed. Examples -------- >>> df = pd.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12] ... ]).set_index([0, 1]).rename_axis(['a', 'b']) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level_2', axis=1) level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 """ labels = self._get_axis(axis) new_labels = labels.droplevel(level) return self.set_axis(new_labels, axis=axis, inplace=False) def pop(self, item: Hashable) -> Series | Any: result = self[item] del self[item] return result @final def squeeze(self, axis=None): """ Squeeze 1 dimensional axis objects into scalars. Series or DataFrames with a single element are squeezed to a scalar. DataFrames with a single column or a single row are squeezed to a Series. Otherwise the object is unchanged. This method is most useful when you don't know if your object is a Series or DataFrame, but you do know it has just a single column. In that case you can safely call `squeeze` to ensure you have a Series. Parameters ---------- axis : {0 or 'index', 1 or 'columns', None}, default None A specific axis to squeeze. By default, all length-1 axes are squeezed. Returns ------- DataFrame, Series, or scalar The projection after squeezing `axis` or all the axes. See Also -------- Series.iloc : Integer-location based indexing for selecting scalars. DataFrame.iloc : Integer-location based indexing for selecting Series. Series.to_frame : Inverse of DataFrame.squeeze for a single-column DataFrame. Examples -------- >>> primes = pd.Series([2, 3, 5, 7]) Slicing might produce a Series with a single value: >>> even_primes = primes[primes % 2 == 0] >>> even_primes 0 2 dtype: int64 >>> even_primes.squeeze() 2 Squeezing objects with more than one value in every axis does nothing: >>> odd_primes = primes[primes % 2 == 1] >>> odd_primes 1 3 2 5 3 7 dtype: int64 >>> odd_primes.squeeze() 1 3 2 5 3 7 dtype: int64 Squeezing is even more effective when used with DataFrames. >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) >>> df a b 0 1 2 1 3 4 Slicing a single column will produce a DataFrame with the columns having only one value: >>> df_a = df[['a']] >>> df_a a 0 1 1 3 So the columns can be squeezed down, resulting in a Series: >>> df_a.squeeze('columns') 0 1 1 3 Name: a, dtype: int64 Slicing a single row from a single column will produce a single scalar DataFrame: >>> df_0a = df.loc[df.index < 1, ['a']] >>> df_0a a 0 1 Squeezing the rows produces a single scalar Series: >>> df_0a.squeeze('rows') a 1 Name: 0, dtype: int64 Squeezing all axes will project directly into a scalar: >>> df_0a.squeeze() 1 """ axis = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),) return self.iloc[ tuple( 0 if i in axis and len(a) == 1 else slice(None) for i, a in enumerate(self.axes) ) ] # ---------------------------------------------------------------------- # Rename def _rename( self: NDFrameT, mapper: Renamer | None = None, *, index: Renamer | None = None, columns: Renamer | None = None, axis: Axis | None = None, copy: bool_t = True, inplace: bool_t = False, level: Level | None = None, errors: str = "ignore", ) -> NDFrameT | None: """ Alter axes input function or functions. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value (Series only). Parameters ---------- %(axes)s : scalar, list-like, dict-like or function, optional Scalar or list-like will alter the ``Series.name`` attribute, and raise on DataFrame. dict-like or functions are transformations to apply to that axis' values copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new {klass}. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- renamed : {klass} (new object) Raises ------ KeyError If any of the labels is not found in the selected axis and "errors='raise'". See Also -------- NDFrame.rename_axis Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 Since ``DataFrame`` doesn't have a ``.name`` attribute, only mapping-type arguments are allowed. >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(2) Traceback (most recent call last): ... TypeError: 'int' object is not callable ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. >>> df.rename(index=str, columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 >>> df.rename(index=str, columns={"A": "a", "C": "c"}) a B 0 1 4 1 2 5 2 3 6 Using axis-style parameters >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6 See the :ref:`user guide <basics.rename>` for more. """ if mapper is None and index is None and columns is None: raise TypeError("must pass an index to rename") if index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) elif mapper is not None: raise TypeError( "Cannot specify both 'mapper' and any of 'index' or 'columns'" ) else: # use the mapper argument if axis and self._get_axis_number(axis) == 1: columns = mapper else: index = mapper self._check_inplace_and_allows_duplicate_labels(inplace) result = self if inplace else self.copy(deep=copy) for axis_no, replacements in enumerate((index, columns)): if replacements is None: continue ax = self._get_axis(axis_no) f = com.get_rename_function(replacements) if level is not None: level = ax._get_level_number(level) # GH 13473 if not callable(replacements): if ax._is_multi and level is not None: indexer = ax.get_level_values(level).get_indexer_for(replacements) else: indexer = ax.get_indexer_for(replacements) if errors == "raise" and len(indexer[indexer == -1]): missing_labels = [ label for index, label in enumerate(replacements) if indexer[index] == -1 ] raise KeyError(f"{missing_labels} not found in axis") new_index = ax._transform_index(f, level=level) result._set_axis_nocheck(new_index, axis=axis_no, inplace=True) result._clear_item_cache() if inplace: self._update_inplace(result) return None else: return result.__finalize__(self, method="rename") @rewrite_axis_style_signature("mapper", [("copy", True), ("inplace", False)]) def rename_axis(self, mapper=lib.no_default, **kwargs): """ Set the name of the axis for the index or columns. Parameters ---------- mapper : scalar, list-like, optional Value to set the axis name attribute. index, columns : scalar, list-like, dict-like or function, optional A scalar, list-like, dict-like or functions transformations to apply to that axis' values. Note that the ``columns`` parameter is not allowed if the object is a Series. This parameter only apply for DataFrame type objects. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and/or ``columns``. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to rename. copy : bool, default True Also copy underlying data. inplace : bool, default False Modifies the object directly, instead of creating a new Series or DataFrame. Returns ------- Series, DataFrame, or None The same type as the caller or None if ``inplace=True``. See Also -------- Series.rename : Alter Series index labels or name. DataFrame.rename : Alter DataFrame index labels or name. Index.rename : Set new names on index. Notes ----- ``DataFrame.rename_axis`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` The first calling convention will only modify the names of the index and/or the names of the Index object that is the columns. In this case, the parameter ``copy`` is ignored. The second calling convention will modify the names of the corresponding index if mapper is a list or a scalar. However, if mapper is dict-like or a function, it will use the deprecated behavior of modifying the axis *labels*. We *highly* recommend using keyword arguments to clarify your intent. Examples -------- **Series** >>> s = pd.Series(["dog", "cat", "monkey"]) >>> s 0 dog 1 cat 2 monkey dtype: object >>> s.rename_axis("animal") animal 0 dog 1 cat 2 monkey dtype: object **DataFrame** >>> df = pd.DataFrame({"num_legs": [4, 4, 2], ... "num_arms": [0, 0, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs num_arms dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("animal") >>> df num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("limbs", axis="columns") >>> df limbs num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 **MultiIndex** >>> df.index = pd.MultiIndex.from_product([['mammal'], ... ['dog', 'cat', 'monkey']], ... names=['type', 'name']) >>> df limbs num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(index={'type': 'class'}) limbs num_legs num_arms class name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(columns=str.upper) LIMBS num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 """ axes, kwargs = self._construct_axes_from_arguments( (), kwargs, sentinel=lib.no_default ) copy = kwargs.pop("copy", True) inplace = kwargs.pop("inplace", False) axis = kwargs.pop("axis", 0) if axis is not None: axis = self._get_axis_number(axis) if kwargs: raise TypeError( "rename_axis() got an unexpected keyword " f'argument "{list(kwargs.keys())[0]}"' ) inplace = validate_bool_kwarg(inplace, "inplace") if mapper is not lib.no_default: # Use v0.23 behavior if a scalar or list non_mapper = is_scalar(mapper) or ( is_list_like(mapper) and not is_dict_like(mapper) ) if non_mapper: return self._set_axis_name(mapper, axis=axis, inplace=inplace) else: raise ValueError("Use `.rename` to alter labels with a mapper.") else: # Use new behavior. Means that index and/or columns # is specified result = self if inplace else self.copy(deep=copy) for axis in range(self._AXIS_LEN): v = axes.get(self._get_axis_name(axis)) if v is lib.no_default: continue non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v)) if non_mapper: newnames = v else: f = com.get_rename_function(v) curnames = self._get_axis(axis).names newnames = [f(name) for name in curnames] result._set_axis_name(newnames, axis=axis, inplace=True) if not inplace: return result @final def _set_axis_name(self, name, axis=0, inplace=False): """ Set the name(s) of the axis. Parameters ---------- name : str or list of str Name(s) to set. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to set the label. The value 0 or 'index' specifies index, and the value 1 or 'columns' specifies columns. inplace : bool, default False If `True`, do operation inplace and return None. Returns ------- Series, DataFrame, or None The same type as the caller or `None` if `inplace` is `True`. See Also -------- DataFrame.rename : Alter the axis labels of :class:`DataFrame`. Series.rename : Alter the index labels or set the index name of :class:`Series`. Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`. Examples -------- >>> df = pd.DataFrame({"num_legs": [4, 4, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs dog 4 cat 4 monkey 2 >>> df._set_axis_name("animal") num_legs animal dog 4 cat 4 monkey 2 >>> df.index = pd.MultiIndex.from_product( ... [["mammal"], ['dog', 'cat', 'monkey']]) >>> df._set_axis_name(["type", "name"]) num_legs type name mammal dog 4 cat 4 monkey 2 """ axis = self._get_axis_number(axis) idx = self._get_axis(axis).set_names(name) inplace = validate_bool_kwarg(inplace, "inplace") renamed = self if inplace else self.copy() renamed.set_axis(idx, axis=axis, inplace=True) if not inplace: return renamed # ---------------------------------------------------------------------- # Comparison Methods @final def _indexed_same(self, other) -> bool_t: return all( self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS ) @final def equals(self, other: object) -> bool_t: """ Test whether two objects contain the same elements. This function allows two Series or DataFrames to be compared against each other to see if they have the same shape and elements. NaNs in the same location are considered equal. The row/column index do not need to have the same type, as long as the values are considered equal. Corresponding columns must be of the same dtype. Parameters ---------- other : Series or DataFrame The other Series or DataFrame to be compared with the first. Returns ------- bool True if all elements are the same in both objects, False otherwise. See Also -------- Series.eq : Compare two Series objects of the same length and return a Series where each element is True if the element in each Series is equal, False otherwise. DataFrame.eq : Compare two DataFrame objects of the same shape and return a DataFrame where each element is True if the respective element in each DataFrame is equal, False otherwise. testing.assert_series_equal : Raises an AssertionError if left and right are not equal. Provides an easy interface to ignore inequality in dtypes, indexes and precision among others. testing.assert_frame_equal : Like assert_series_equal, but targets DataFrames. numpy.array_equal : Return True if two arrays have the same shape and elements, False otherwise. Examples -------- >>> df = pd.DataFrame({1: [10], 2: [20]}) >>> df 1 2 0 10 20 DataFrames df and exactly_equal have the same types and values for their elements and column labels, which will return True. >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]}) >>> exactly_equal 1 2 0 10 20 >>> df.equals(exactly_equal) True DataFrames df and different_column_type have the same element types and values, but have different types for the column labels, which will still return True. >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]}) >>> different_column_type 1.0 2.0 0 10 20 >>> df.equals(different_column_type) True DataFrames df and different_data_type have different types for the same values for their elements, and will return False even though their column labels are the same values and types. >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]}) >>> different_data_type 1 2 0 10.0 20.0 >>> df.equals(different_data_type) False """ if not (isinstance(other, type(self)) or isinstance(self, type(other))): return False other = cast(NDFrame, other) return self._mgr.equals(other._mgr) # ------------------------------------------------------------------------- # Unary Methods @final def __neg__(self): def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): return operator.inv(values) else: return operator.neg(values) new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__neg__") @final def __pos__(self): def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): return values.copy() else: return operator.pos(values) new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__pos__") @final def __invert__(self): if not self.size: # inv fails with 0 len return self new_data = self._mgr.apply(operator.invert) return self._constructor(new_data).__finalize__(self, method="__invert__") @final def __nonzero__(self): raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ @final def bool(self): """ Return the bool of a single element Series or DataFrame. This must be a boolean scalar value, either True or False. It will raise a ValueError if the Series or DataFrame does not have exactly 1 element, or that element is not boolean (integer values 0 and 1 will also raise an exception). Returns ------- bool The value in the Series or DataFrame. See Also -------- Series.astype : Change the data type of a Series, including to boolean. DataFrame.astype : Change the data type of a DataFrame, including to boolean. numpy.bool_ : NumPy boolean data type, used by pandas for boolean values. Examples -------- The method will only work for single element objects with a boolean value: >>> pd.Series([True]).bool() True >>> pd.Series([False]).bool() False >>> pd.DataFrame({'col': [True]}).bool() True >>> pd.DataFrame({'col': [False]}).bool() False """ v = self.squeeze() if isinstance(v, (bool, np.bool_)): return bool(v) elif is_scalar(v): raise ValueError( "bool cannot act on a non-boolean single element " f"{type(self).__name__}" ) self.__nonzero__() @final def abs(self: NDFrameT) -> NDFrameT: """ Return a Series/DataFrame with absolute numeric value of each element. This function only applies to elements that are all numeric. Returns ------- abs Series/DataFrame containing the absolute value of each element. See Also -------- numpy.absolute : Calculate the absolute value element-wise. Notes ----- For ``complex`` inputs, ``1.2 + 1j``, the absolute value is :math:`\\sqrt{ a^2 + b^2 }`. Examples -------- Absolute numeric values in a Series. >>> s = pd.Series([-1.10, 2, -3.33, 4]) >>> s.abs() 0 1.10 1 2.00 2 3.33 3 4.00 dtype: float64 Absolute numeric values in a Series with complex numbers. >>> s = pd.Series([1.2 + 1j]) >>> s.abs() 0 1.56205 dtype: float64 Absolute numeric values in a Series with a Timedelta element. >>> s = pd.Series([pd.Timedelta('1 days')]) >>> s.abs() 0 1 days dtype: timedelta64[ns] Select rows with data closest to certain value using argsort (from `StackOverflow <https://stackoverflow.com/a/17758115>`__). >>> df = pd.DataFrame({ ... 'a': [4, 5, 6, 7], ... 'b': [10, 20, 30, 40], ... 'c': [100, 50, -30, -50] ... }) >>> df a b c 0 4 10 100 1 5 20 50 2 6 30 -30 3 7 40 -50 >>> df.loc[(df.c - 43).abs().argsort()] a b c 1 5 20 50 0 4 10 100 2 6 30 -30 3 7 40 -50 """ res_mgr = self._mgr.apply(np.abs) return self._constructor(res_mgr).__finalize__(self, name="abs") @final def __abs__(self: NDFrameT) -> NDFrameT: return self.abs() @final def __round__(self: NDFrameT, decimals: int = 0) -> NDFrameT: return self.round(decimals).__finalize__(self, method="__round__") # ------------------------------------------------------------------------- # Label or Level Combination Helpers # # A collection of helper methods for DataFrame/Series operations that # accept a combination of column/index labels and levels. All such # operations should utilize/extend these methods when possible so that we # have consistent precedence and validation logic throughout the library. @final def _is_level_reference(self, key, axis=0): """ Test whether a key is a level reference for a given axis. To be considered a level reference, `key` must be a string that: - (axis=0): Matches the name of an index level and does NOT match a column label. - (axis=1): Matches the name of a column level and does NOT match an index label. Parameters ---------- key : str Potential level name for the given axis axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- is_level : bool """ axis = self._get_axis_number(axis) return ( key is not None and is_hashable(key) and key in self.axes[axis].names and not self._is_label_reference(key, axis=axis) ) @final def _is_label_reference(self, key, axis=0) -> bool_t: """ Test whether a key is a label reference for a given axis. To be considered a label reference, `key` must be a string that: - (axis=0): Matches a column label - (axis=1): Matches an index label Parameters ---------- key : str Potential label name axis : int, default 0 Axis perpendicular to the axis that labels are associated with (0 means search for column labels, 1 means search for index labels) Returns ------- is_label: bool """ axis = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis) return ( key is not None and is_hashable(key) and any(key in self.axes[ax] for ax in other_axes) ) @final def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t: """ Test whether a key is a label or level reference for a given axis. To be considered either a label or a level reference, `key` must be a string that: - (axis=0): Matches a column label or an index level - (axis=1): Matches an index label or a column level Parameters ---------- key : str Potential label or level name axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- bool """ return self._is_level_reference(key, axis=axis) or self._is_label_reference( key, axis=axis ) @final def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None: """ Check whether `key` is ambiguous. By ambiguous, we mean that it matches both a level of the input `axis` and a label of the other axis. Parameters ---------- key : str or object Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns). Raises ------ ValueError: `key` is ambiguous """ axis = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis) if ( key is not None and is_hashable(key) and key in self.axes[axis].names and any(key in self.axes[ax] for ax in other_axes) ): # Build an informative and grammatical warning level_article, level_type = ( ("an", "index") if axis == 0 else ("a", "column") ) label_article, label_type = ( ("a", "column") if axis == 0 else ("an", "index") ) msg = ( f"'{key}' is both {level_article} {level_type} level and " f"{label_article} {label_type} label, which is ambiguous." ) raise ValueError(msg) @final def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray: """ Return a 1-D array of values associated with `key`, a label or level from the given `axis`. Retrieval logic: - (axis=0): Return column values if `key` matches a column label. Otherwise return index level values if `key` matches an index level. - (axis=1): Return row values if `key` matches an index label. Otherwise return column level values if 'key' matches a column level Parameters ---------- key : str Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- values : np.ndarray Raises ------ KeyError if `key` matches neither a label nor a level ValueError if `key` matches multiple labels FutureWarning if `key` is ambiguous. This will become an ambiguity error in a future version """ axis = self._get_axis_number(axis) other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] if self._is_label_reference(key, axis=axis): self._check_label_or_level_ambiguity(key, axis=axis) values = self.xs(key, axis=other_axes[0])._values elif self._is_level_reference(key, axis=axis): values = self.axes[axis].get_level_values(key)._values else: raise KeyError(key) # Check for duplicates if values.ndim > 1: if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex): multi_message = ( "\n" "For a multi-index, the label must be a " "tuple with elements corresponding to each level." ) else: multi_message = "" label_axis_name = "column" if axis == 0 else "index" raise ValueError( f"The {label_axis_name} label '{key}' is not unique.{multi_message}" ) return values @final def _drop_labels_or_levels(self, keys, axis: int = 0): """ Drop labels and/or levels for the given `axis`. For each key in `keys`: - (axis=0): If key matches a column label then drop the column. Otherwise if key matches an index level then drop the level. - (axis=1): If key matches an index label then drop the row. Otherwise if key matches a column level then drop the level. Parameters ---------- keys : str or list of str labels or levels to drop axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- dropped: DataFrame Raises ------ ValueError if any `keys` match neither a label nor a level """ axis = self._get_axis_number(axis) # Validate keys keys = com.maybe_make_list(keys) invalid_keys = [ k for k in keys if not self._is_label_or_level_reference(k, axis=axis) ] if invalid_keys: raise ValueError( "The following keys are not valid labels or " f"levels for axis {axis}: {invalid_keys}" ) # Compute levels and labels to drop levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)] labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)] # Perform copy upfront and then use inplace operations below. # This ensures that we always perform exactly one copy. # ``copy`` and/or ``inplace`` options could be added in the future. dropped = self.copy() if axis == 0: # Handle dropping index levels if levels_to_drop: dropped.reset_index(levels_to_drop, drop=True, inplace=True) # Handle dropping columns labels if labels_to_drop: dropped.drop(labels_to_drop, axis=1, inplace=True) else: # Handle dropping column levels if levels_to_drop: if isinstance(dropped.columns, MultiIndex): # Drop the specified levels from the MultiIndex dropped.columns = dropped.columns.droplevel(levels_to_drop) else: # Drop the last level of Index by replacing with # a RangeIndex dropped.columns = RangeIndex(dropped.columns.size) # Handle dropping index labels if labels_to_drop: dropped.drop(labels_to_drop, axis=0, inplace=True) return dropped # ---------------------------------------------------------------------- # Iteration # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: None # type: ignore[assignment] def __iter__(self): """ Iterate over info axis. Returns ------- iterator Info axis as iterator. """ return iter(self._info_axis) # can we get a better explanation of this? def keys(self): """ Get the 'info axis' (see Indexing for more). This is index for Series, columns for DataFrame. Returns ------- Index Info axis. """ return self._info_axis def items(self): """ Iterate over (label, values) on info axis This is index for Series and columns for DataFrame. Returns ------- Generator """ for h in self._info_axis: yield h, self[h] def __len__(self) -> int: """Returns length of info axis""" return len(self._info_axis) @final def __contains__(self, key) -> bool_t: """True if the key is in the info axis""" return key in self._info_axis @property def empty(self) -> bool_t: """ Indicator whether Series/DataFrame is empty. True if Series/DataFrame is entirely empty (no items), meaning any of the axes are of length 0. Returns ------- bool If Series/DataFrame is empty, return True, if not return False. See Also -------- Series.dropna : Return series without null values. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. Notes ----- If Series/DataFrame contains only NaNs, it is still not considered empty. See the example below. Examples -------- An example of an actual empty DataFrame. Notice the index is empty: >>> df_empty = pd.DataFrame({'A' : []}) >>> df_empty Empty DataFrame Columns: [A] Index: [] >>> df_empty.empty True If we only have NaNs in our DataFrame, it is not considered empty! We will need to drop the NaNs to make the DataFrame empty: >>> df = pd.DataFrame({'A' : [np.nan]}) >>> df A 0 NaN >>> df.empty False >>> df.dropna().empty True >>> ser_empty = pd.Series({'A' : []}) >>> ser_empty A [] dtype: object >>> ser_empty.empty False >>> ser_empty = pd.Series() >>> ser_empty.empty True """ return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS) # ---------------------------------------------------------------------- # Array Interface # This is also set in IndexOpsMixin # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented __array_priority__ = 1000 def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: return np.asarray(self._values, dtype=dtype) def __array_wrap__( self, result: np.ndarray, context: tuple[Callable, tuple[Any, ...], int] | None = None, ): """ Gets called after a ufunc and other functions. Parameters ---------- result: np.ndarray The result of the ufunc or other function called on the NumPy array returned by __array__ context: tuple of (func, tuple, int) This parameter is returned by ufuncs as a 3-element tuple: (name of the ufunc, arguments of the ufunc, domain of the ufunc), but is not set by other numpy functions.q Notes ----- Series implements __array_ufunc_ so this not called for ufunc on Series. """ # Note: at time of dask 2022.01.0, this is still used by dask warnings.warn( "The __array_wrap__ method of DataFrame and Series will be removed in " "a future version", DeprecationWarning, stacklevel=2, ) res = lib.item_from_zerodim(result) if is_scalar(res): # e.g. we get here with np.ptp(series) # ptp also requires the item_from_zerodim return res d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) return self._constructor(res, **d).__finalize__(self, method="__array_wrap__") @final def __array_ufunc__( self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any ): return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs) # ---------------------------------------------------------------------- # Picklability @final def __getstate__(self) -> dict[str, Any]: meta = {k: getattr(self, k, None) for k in self._metadata} return { "_mgr": self._mgr, "_typ": self._typ, "_metadata": self._metadata, "attrs": self.attrs, "_flags": {k: self.flags[k] for k in self.flags._keys}, **meta, } @final def __setstate__(self, state): if isinstance(state, BlockManager): self._mgr = state elif isinstance(state, dict): if "_data" in state and "_mgr" not in state: # compat for older pickles state["_mgr"] = state.pop("_data") typ = state.get("_typ") if typ is not None: attrs = state.get("_attrs", {}) object.__setattr__(self, "_attrs", attrs) flags = state.get("_flags", {"allows_duplicate_labels": True}) object.__setattr__(self, "_flags", Flags(self, **flags)) # set in the order of internal names # to avoid definitional recursion # e.g. say fill_value needing _mgr to be # defined meta = set(self._internal_names + self._metadata) for k in list(meta): if k in state and k != "_flags": v = state[k] object.__setattr__(self, k, v) for k, v in state.items(): if k not in meta: object.__setattr__(self, k, v) else: raise NotImplementedError("Pre-0.12 pickles are no longer supported") elif len(state) == 2: raise NotImplementedError("Pre-0.12 pickles are no longer supported") self._item_cache = {} # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: # string representation based upon iterating over self # (since, by definition, `PandasContainers` are iterable) prepr = f"[{','.join(map(pprint_thing, self))}]" return f"{type(self).__name__}({prepr})" @final def _repr_latex_(self): """ Returns a LaTeX representation for a particular object. Mainly for use with nbconvert (jupyter notebook conversion to pdf). """ if config.get_option("display.latex.repr"): return self.to_latex() else: return None @final def _repr_data_resource_(self): """ Not a real Jupyter special repr method, but we use the same naming convention. """ if config.get_option("display.html.table_schema"): data = self.head(config.get_option("display.max_rows")) as_json = data.to_json(orient="table") as_json = cast(str, as_json) return json.loads(as_json, object_pairs_hook=collections.OrderedDict) # ---------------------------------------------------------------------- # I/O Methods @final @doc(klass="object", storage_options=_shared_docs["storage_options"]) def to_excel( self, excel_writer, sheet_name: str = "Sheet1", na_rep: str = "", float_format: str | None = None, columns=None, header=True, index=True, index_label=None, startrow=0, startcol=0, engine=None, merge_cells=True, encoding=None, inf_rep="inf", verbose=True, freeze_panes=None, storage_options: StorageOptions = None, ) -> None: """ Write {klass} to an Excel sheet. To write a single {klass} to an Excel .xlsx file it is only necessary to specify a target file name. To write to multiple sheets it is necessary to create an `ExcelWriter` object with a target file name, and specify a sheet in the file to write to. Multiple sheets may be written to by specifying unique `sheet_name`. With all data written to the file it is necessary to save the changes. Note that creating an `ExcelWriter` object with a file name that already exists will result in the contents of the existing file being erased. Parameters ---------- excel_writer : path-like, file-like, or ExcelWriter object File path or existing ExcelWriter. sheet_name : str, default 'Sheet1' Name of sheet which will contain DataFrame. na_rep : str, default '' Missing data representation. float_format : str, optional Format string for floating point numbers. For example ``float_format="%.2f"`` will format 0.1234 to 0.12. columns : sequence or list of str, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of string is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, optional Column label for index column(s) if desired. If not specified, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. startrow : int, default 0 Upper left cell row to dump data frame. startcol : int, default 0 Upper left cell column to dump data frame. engine : str, optional Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and ``io.excel.xlsm.writer``. .. deprecated:: 1.2.0 As the `xlwt <https://pypi.org/project/xlwt/>`__ package is no longer maintained, the ``xlwt`` engine will be removed in a future version of pandas. merge_cells : bool, default True Write MultiIndex and Hierarchical Rows as merged cells. encoding : str, optional Encoding of the resulting excel file. Only necessary for xlwt, other writers support unicode natively. inf_rep : str, default 'inf' Representation for infinity (there is no native representation for infinity in Excel). verbose : bool, default True Display more information in the error logs. freeze_panes : tuple of int (length 2), optional Specifies the one-based bottommost row and rightmost column that is to be frozen. {storage_options} .. versionadded:: 1.2.0 See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. ExcelWriter : Class for writing DataFrame objects into excel sheets. read_excel : Read an Excel file into a pandas DataFrame. read_csv : Read a comma-separated values (csv) file into DataFrame. Notes ----- For compatibility with :meth:`~DataFrame.to_csv`, to_excel serializes lists and dicts to strings before writing. Once a workbook has been saved it is not possible to write further data without rewriting the whole workbook. Examples -------- Create, write to and save a workbook: >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) >>> df1.to_excel("output.xlsx") # doctest: +SKIP To specify the sheet name: >>> df1.to_excel("output.xlsx", ... sheet_name='Sheet_name_1') # doctest: +SKIP If you wish to write to more than one sheet in the workbook, it is necessary to specify an ExcelWriter object: >>> df2 = df1.copy() >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP ... df1.to_excel(writer, sheet_name='Sheet_name_1') ... df2.to_excel(writer, sheet_name='Sheet_name_2') ExcelWriter can also be used to append to an existing Excel file: >>> with pd.ExcelWriter('output.xlsx', ... mode='a') as writer: # doctest: +SKIP ... df.to_excel(writer, sheet_name='Sheet_name_3') To set the library that is used to write the Excel file, you can pass the `engine` keyword (the default engine is automatically chosen depending on the file extension): >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() from pandas.io.formats.excel import ExcelFormatter formatter = ExcelFormatter( df, na_rep=na_rep, cols=columns, header=header, float_format=float_format, index=index, index_label=index_label, merge_cells=merge_cells, inf_rep=inf_rep, ) formatter.write( excel_writer, sheet_name=sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes, engine=engine, storage_options=storage_options, ) @final @doc( storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_json( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, orient: str | None = None, date_format: str | None = None, double_precision: int = 10, force_ascii: bool_t = True, date_unit: str = "ms", default_handler: Callable[[Any], JSONSerializable] | None = None, lines: bool_t = False, compression: CompressionOptions = "infer", index: bool_t = True, indent: int | None = None, storage_options: StorageOptions = None, ) -> str | None: """ Convert the object to a JSON string. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. orient : str Indication of expected JSON string format. * Series: - default is 'index' - allowed values are: {{'split', 'records', 'index', 'table'}}. * DataFrame: - default is 'columns' - allowed values are: {{'split', 'records', 'index', 'columns', 'values', 'table'}}. * The format of the JSON string: - 'split' : dict like {{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}} - 'records' : list like [{{column -> value}}, ... , {{column -> value}}] - 'index' : dict like {{index -> {{column -> value}}}} - 'columns' : dict like {{column -> {{index -> value}}}} - 'values' : just the values array - 'table' : dict like {{'schema': {{schema}}, 'data': {{data}}}} Describing the data, where data component is like ``orient='records'``. date_format : {{None, 'epoch', 'iso'}} Type of date conversion. 'epoch' = epoch milliseconds, 'iso' = ISO8601. The default depends on the `orient`. For ``orient='table'``, the default is 'iso'. For all other orients, the default is 'epoch'. double_precision : int, default 10 The number of decimal places to use when encoding floating point values. force_ascii : bool, default True Force encoded string to be ASCII. date_unit : str, default 'ms' (milliseconds) The time unit to encode to, governs timestamp and ISO8601 precision. One of 's', 'ms', 'us', 'ns' for second, millisecond, microsecond, and nanosecond respectively. default_handler : callable, default None Handler to call if object cannot otherwise be converted to a suitable format for JSON. Should receive a single argument which is the object to convert and return a serialisable object. lines : bool, default False If 'orient' is 'records' write out line-delimited json format. Will throw ValueError if incorrect 'orient' since others are not list-like. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. index : bool, default True Whether to include the index values in the JSON string. Not including the index (``index=False``) is only supported when orient is 'split' or 'table'. indent : int, optional Length of whitespace used to indent each record. .. versionadded:: 1.0.0 {storage_options} .. versionadded:: 1.2.0 Returns ------- None or str If path_or_buf is None, returns the resulting json format as a string. Otherwise returns None. See Also -------- read_json : Convert a JSON string to pandas object. Notes ----- The behavior of ``indent=0`` varies from the stdlib, which does not indent the output but does insert newlines. Currently, ``indent=0`` and the default ``indent=None`` are equivalent in pandas, though this may change in a future release. ``orient='table'`` contains a 'pandas_version' field under 'schema'. This stores the version of `pandas` used in the latest revision of the schema. Examples -------- >>> import json >>> df = pd.DataFrame( ... [["a", "b"], ["c", "d"]], ... index=["row 1", "row 2"], ... columns=["col 1", "col 2"], ... ) >>> result = df.to_json(orient="split") >>> parsed = json.loads(result) >>> json.dumps(parsed, indent=4) # doctest: +SKIP {{ "columns": [ "col 1", "col 2" ], "index": [ "row 1", "row 2" ], "data": [ [ "a", "b" ], [ "c", "d" ] ] }} Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> result = df.to_json(orient="records") >>> parsed = json.loads(result) >>> json.dumps(parsed, indent=4) # doctest: +SKIP [ {{ "col 1": "a", "col 2": "b" }}, {{ "col 1": "c", "col 2": "d" }} ] Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> result = df.to_json(orient="index") >>> parsed = json.loads(result) >>> json.dumps(parsed, indent=4) # doctest: +SKIP {{ "row 1": {{ "col 1": "a", "col 2": "b" }}, "row 2": {{ "col 1": "c", "col 2": "d" }} }} Encoding/decoding a Dataframe using ``'columns'`` formatted JSON: >>> result = df.to_json(orient="columns") >>> parsed = json.loads(result) >>> json.dumps(parsed, indent=4) # doctest: +SKIP {{ "col 1": {{ "row 1": "a", "row 2": "c" }}, "col 2": {{ "row 1": "b", "row 2": "d" }} }} Encoding/decoding a Dataframe using ``'values'`` formatted JSON: >>> result = df.to_json(orient="values") >>> parsed = json.loads(result) >>> json.dumps(parsed, indent=4) # doctest: +SKIP [ [ "a", "b" ], [ "c", "d" ] ] Encoding with Table Schema: >>> result = df.to_json(orient="table") >>> parsed = json.loads(result) >>> json.dumps(parsed, indent=4) # doctest: +SKIP {{ "schema": {{ "fields": [ {{ "name": "index", "type": "string" }}, {{ "name": "col 1", "type": "string" }}, {{ "name": "col 2", "type": "string" }} ], "primaryKey": [ "index" ], "pandas_version": "1.4.0" }}, "data": [ {{ "index": "row 1", "col 1": "a", "col 2": "b" }}, {{ "index": "row 2", "col 1": "c", "col 2": "d" }} ] }} """ from pandas.io import json if date_format is None and orient == "table": date_format = "iso" elif date_format is None: date_format = "epoch" config.is_nonnegative_int(indent) indent = indent or 0 return json.to_json( path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, lines=lines, compression=compression, index=index, indent=indent, storage_options=storage_options, ) @final def to_hdf( self, path_or_buf, key: str, mode: str = "a", complevel: int | None = None, complib: str | None = None, append: bool_t = False, format: str | None = None, index: bool_t = True, min_itemsize: int | dict[str, int] | None = None, nan_rep=None, dropna: bool_t | None = None, data_columns: Literal[True] | list[str] | None = None, errors: str = "strict", encoding: str = "UTF-8", ) -> None: """ Write the contained data to an HDF5 file using HDFStore. Hierarchical Data Format (HDF) is self-describing, allowing an application to interpret the structure and contents of a file with no outside information. One HDF file can hold a mix of related objects which can be accessed as a group or as individual objects. In order to add another DataFrame or Series to an existing HDF file please use append mode and a different a key. .. warning:: One can store a subclass of ``DataFrame`` or ``Series`` to HDF5, but the type of the subclass is lost upon storing. For more information see the :ref:`user guide <io.hdf5>`. Parameters ---------- path_or_buf : str or pandas.HDFStore File path or HDFStore object. key : str Identifier for the group in the store. mode : {'a', 'w', 'r+'}, default 'a' Mode to open file: - 'w': write, a new file is created (an existing file with the same name would be deleted). - 'a': append, an existing file is opened for reading and writing, and if the file does not exist it is created. - 'r+': similar to 'a', but the file must already exist. complevel : {0-9}, default None Specifies a compression level for data. A value of 0 or None disables compression. complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib' Specifies the compression library to be used. As of v0.20.2 these additional compressors for Blosc are supported (default if no compressor specified: 'blosc:blosclz'): {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', 'blosc:zlib', 'blosc:zstd'}. Specifying a compression library which is not available issues a ValueError. append : bool, default False For Table formats, append the input data to the existing. format : {'fixed', 'table', None}, default 'fixed' Possible values: - 'fixed': Fixed format. Fast writing/reading. Not-appendable, nor searchable. - 'table': Table format. Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data. - If None, pd.get_option('io.hdf.default_format') is checked, followed by fallback to "fixed". errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. encoding : str, default "UTF-8" min_itemsize : dict or int, optional Map column names to minimum string sizes for columns. nan_rep : Any, optional How to represent null values as str. Not allowed with append=True. data_columns : list of columns or True, optional List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See :ref:`io.hdf5-query-data-columns`. Applicable only to format='table'. See Also -------- read_hdf : Read from HDF file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. DataFrame.to_sql : Write to a SQL table. DataFrame.to_feather : Write out feather-format for DataFrames. DataFrame.to_csv : Write out to a csv file. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, ... index=['a', 'b', 'c']) # doctest: +SKIP >>> df.to_hdf('data.h5', key='df', mode='w') # doctest: +SKIP We can add another object to the same file: >>> s = pd.Series([1, 2, 3, 4]) # doctest: +SKIP >>> s.to_hdf('data.h5', key='s') # doctest: +SKIP Reading from HDF file: >>> pd.read_hdf('data.h5', 'df') # doctest: +SKIP A B a 1 4 b 2 5 c 3 6 >>> pd.read_hdf('data.h5', 's') # doctest: +SKIP 0 1 1 2 2 3 3 4 dtype: int64 """ from pandas.io import pytables # Argument 3 to "to_hdf" has incompatible type "NDFrame"; expected # "Union[DataFrame, Series]" [arg-type] pytables.to_hdf( path_or_buf, key, self, # type: ignore[arg-type] mode=mode, complevel=complevel, complib=complib, append=append, format=format, index=index, min_itemsize=min_itemsize, nan_rep=nan_rep, dropna=dropna, data_columns=data_columns, errors=errors, encoding=encoding, ) @final def to_sql( self, name: str, con, schema=None, if_exists: str = "fail", index: bool_t = True, index_label=None, chunksize=None, dtype: DtypeArg | None = None, method=None, ) -> int | None: """ Write records stored in a DataFrame to a SQL database. Databases supported by SQLAlchemy [1]_ are supported. Tables can be newly created, appended to, or overwritten. Parameters ---------- name : str Name of SQL table. con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection Using SQLAlchemy makes it possible to use any DB supported by that library. Legacy support is provided for sqlite3.Connection objects. The user is responsible for engine disposal and connection closure for the SQLAlchemy connectable See `here \ <https://docs.sqlalchemy.org/en/13/core/connections.html>`_. schema : str, optional Specify the schema (if database flavor supports this). If None, use default schema. if_exists : {'fail', 'replace', 'append'}, default 'fail' How to behave if the table already exists. * fail: Raise a ValueError. * replace: Drop the table before inserting new values. * append: Insert new values to the existing table. index : bool, default True Write DataFrame index as a column. Uses `index_label` as the column name in the table. index_label : str or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Specify the number of rows in each batch to be written at a time. By default, all rows will be written at once. dtype : dict or scalar, optional Specifying the datatype for columns. If a dictionary is used, the keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 legacy mode. If a scalar is provided, it will be applied to all columns. method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. Returns ------- None or int Number of rows affected by to_sql. None is returned if the callable passed into ``method`` does not return the number of rows. The number of returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor`` or SQLAlchemy connectable which may not reflect the exact number of written rows as stipulated in the `sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or `SQLAlchemy <https://docs.sqlalchemy.org/en/14/core/connections.html#sqlalchemy.engine.BaseCursorResult.rowcount>`__. .. versionadded:: 1.4.0 Raises ------ ValueError When the table already exists and `if_exists` is 'fail' (the default). See Also -------- read_sql : Read a DataFrame from a table. Notes ----- Timezone aware datetime columns will be written as ``Timestamp with timezone`` type with SQLAlchemy if supported by the database. Otherwise, the datetimes will be stored as timezone unaware timestamps local to the original timezone. References ---------- .. [1] https://docs.sqlalchemy.org .. [2] https://www.python.org/dev/peps/pep-0249/ Examples -------- Create an in-memory SQLite database. >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite://', echo=False) Create a table from scratch with 3 rows. >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']}) >>> df name 0 User 1 1 User 2 2 User 3 >>> df.to_sql('users', con=engine) 3 >>> engine.execute("SELECT * FROM users").fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')] An `sqlalchemy.engine.Connection` can also be passed to `con`: >>> with engine.begin() as connection: ... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']}) ... df1.to_sql('users', con=connection, if_exists='append') 2 This is allowed to support operations that require that the same DBAPI connection is used for the entire operation. >>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']}) >>> df2.to_sql('users', con=engine, if_exists='append') 2 >>> engine.execute("SELECT * FROM users").fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'), (0, 'User 4'), (1, 'User 5'), (0, 'User 6'), (1, 'User 7')] Overwrite the table with just ``df2``. >>> df2.to_sql('users', con=engine, if_exists='replace', ... index_label='id') 2 >>> engine.execute("SELECT * FROM users").fetchall() [(0, 'User 6'), (1, 'User 7')] Specify the dtype (especially useful for integers with missing values). Notice that while pandas is forced to store the data as floating point, the database supports nullable integers. When fetching the data with Python, we get back integer scalars. >>> df = pd.DataFrame({"A": [1, None, 2]}) >>> df A 0 1.0 1 NaN 2 2.0 >>> from sqlalchemy.types import Integer >>> df.to_sql('integers', con=engine, index=False, ... dtype={"A": Integer()}) 3 >>> engine.execute("SELECT * FROM integers").fetchall() [(1,), (None,), (2,)] """ # noqa:E501 from pandas.io import sql return sql.to_sql( self, name, con, schema=schema, if_exists=if_exists, index=index, index_label=index_label, chunksize=chunksize, dtype=dtype, method=method, ) @final @doc( storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_pickle( self, path: FilePath | WriteBuffer[bytes], compression: CompressionOptions = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions = None, ) -> None: """ Pickle (serialize) object to file. Parameters ---------- path : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. File path where the pickled object will be stored. {compression_options} protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible values are 0, 1, 2, 3, 4, 5. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html. {storage_options} .. versionadded:: 1.2.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP >>> original_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> original_df.to_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 """ # noqa: E501 from pandas.io.pickle import to_pickle to_pickle( self, path, compression=compression, protocol=protocol, storage_options=storage_options, ) @final def to_clipboard( self, excel: bool_t = True, sep: str | None = None, **kwargs ) -> None: r""" Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. Parameters ---------- excel : bool, default True Produce output in a csv format for easy pasting into excel. - True, use the provided separator for csv pasting. - False, write a string representation of the object to the clipboard. sep : str, default ``'\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. See Also -------- DataFrame.to_csv : Write a DataFrame to a comma-separated values (csv) file. read_clipboard : Read text from clipboard and pass to read_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `PyQt4` modules) - Windows : none - macOS : none This method uses the processes developed for the package `pyperclip`. A solution to render any output string format is given in the examples. Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 Using the original `pyperclip` package for any string output format. .. code-block:: python import pyperclip html = df.style.to_html() pyperclip.copy(html) """ from pandas.io import clipboards clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs) @final def to_xarray(self): """ Return an xarray object from the pandas object. Returns ------- xarray.DataArray or xarray.Dataset Data in the pandas structure converted to Dataset if the object is a DataFrame, or a DataArray if the object is a Series. See Also -------- DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Notes ----- See the `xarray docs <https://xarray.pydata.org/en/stable/>`__ Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2), ... ('parrot', 'bird', 24.0, 2), ... ('lion', 'mammal', 80.5, 4), ... ('monkey', 'mammal', np.nan, 4)], ... columns=['name', 'class', 'max_speed', ... 'num_legs']) >>> df name class max_speed num_legs 0 falcon bird 389.0 2 1 parrot bird 24.0 2 2 lion mammal 80.5 4 3 monkey mammal NaN 4 >>> df.to_xarray() <xarray.Dataset> Dimensions: (index: 4) Coordinates: * index (index) int64 0 1 2 3 Data variables: name (index) object 'falcon' 'parrot' 'lion' 'monkey' class (index) object 'bird' 'bird' 'mammal' 'mammal' max_speed (index) float64 389.0 24.0 80.5 nan num_legs (index) int64 2 2 4 4 >>> df['max_speed'].to_xarray() <xarray.DataArray 'max_speed' (index: 4)> array([389. , 24. , 80.5, nan]) Coordinates: * index (index) int64 0 1 2 3 >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01', ... '2018-01-02', '2018-01-02']) >>> df_multiindex = pd.DataFrame({'date': dates, ... 'animal': ['falcon', 'parrot', ... 'falcon', 'parrot'], ... 'speed': [350, 18, 361, 15]}) >>> df_multiindex = df_multiindex.set_index(['date', 'animal']) >>> df_multiindex speed date animal 2018-01-01 falcon 350 parrot 18 2018-01-02 falcon 361 parrot 15 >>> df_multiindex.to_xarray() <xarray.Dataset> Dimensions: (animal: 2, date: 2) Coordinates: * date (date) datetime64[ns] 2018-01-01 2018-01-02 * animal (animal) object 'falcon' 'parrot' Data variables: speed (date, animal) int64 350 18 361 15 """ xarray = import_optional_dependency("xarray") if self.ndim == 1: return xarray.DataArray.from_series(self) else: return xarray.Dataset.from_dataframe(self) @final @doc(returns=fmt.return_docstring) def to_latex( self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep="NaN", formatters=None, float_format=None, sparsify=None, index_names=True, bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None, decimal=".", multicolumn=None, multicolumn_format=None, multirow=None, caption=None, label=None, position=None, ): r""" Render object to a LaTeX tabular, longtable, or nested table. Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted into a main LaTeX document or read from an external file with ``\input{{table.tex}}``. .. versionchanged:: 1.0.0 Added caption and label arguments. .. versionchanged:: 1.2.0 Added position argument, changed meaning of caption argument. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default 'NaN' Missing data representation. formatters : list of functions or dict of {{str: function}}, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function or str, optional, default None Formatter for floating point numbers. For example ``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will both result in 0.1234 being formatted as 0.12. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in `LaTeX table format <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3 columns. By default, 'l' will be used for all columns except columns of numbers, which default to 'r'. longtable : bool, optional By default, the value will be read from the pandas config module. Use a longtable environment instead of tabular. Requires adding a \usepackage{{longtable}} to your LaTeX preamble. escape : bool, optional By default, the value will be read from the pandas config module. When set to False prevents from escaping latex special characters in column names. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. multicolumn : bool, default True Use \multicolumn to enhance MultiIndex columns. The default will be read from the config module. multicolumn_format : str, default 'l' The alignment for multicolumns, similar to `column_format` The default will be read from the config module. multirow : bool, default False Use \multirow to enhance MultiIndex rows. Requires adding a \usepackage{{multirow}} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module. caption : str or tuple, optional Tuple (full_caption, short_caption), which results in ``\caption[short_caption]{{full_caption}}``; if a single string is passed, no short caption will be set. .. versionadded:: 1.0.0 .. versionchanged:: 1.2.0 Optionally allow caption to be a tuple ``(full_caption, short_caption)``. label : str, optional The LaTeX label to be placed inside ``\label{{}}`` in the output. This is used with ``\ref{{}}`` in the main ``.tex`` file. .. versionadded:: 1.0.0 position : str, optional The LaTeX positional argument for tables, to be placed after ``\begin{{}}`` in the output. .. versionadded:: 1.2.0 {returns} See Also -------- Styler.to_latex : Render a DataFrame to LaTeX with conditional formatting. DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Examples -------- >>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'], ... mask=['red', 'purple'], ... weapon=['sai', 'bo staff'])) >>> print(df.to_latex(index=False)) # doctest: +SKIP \begin{{tabular}}{{lll}} \toprule name & mask & weapon \\ \midrule Raphael & red & sai \\ Donatello & purple & bo staff \\ \bottomrule \end{{tabular}} """ msg = ( "In future versions `DataFrame.to_latex` is expected to utilise the base " "implementation of `Styler.to_latex` for formatting and rendering. " "The arguments signature may therefore change. It is recommended instead " "to use `DataFrame.style.to_latex` which also contains additional " "functionality." ) warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) # Get defaults from the pandas config if self.ndim == 1: self = self.to_frame() if longtable is None: longtable = config.get_option("display.latex.longtable") if escape is None: escape = config.get_option("display.latex.escape") if multicolumn is None: multicolumn = config.get_option("display.latex.multicolumn") if multicolumn_format is None: multicolumn_format = config.get_option("display.latex.multicolumn_format") if multirow is None: multirow = config.get_option("display.latex.multirow") self = cast("DataFrame", self) formatter = DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, index_names=index_names, escape=escape, decimal=decimal, ) return DataFrameRenderer(formatter).to_latex( buf=buf, column_format=column_format, longtable=longtable, encoding=encoding, multicolumn=multicolumn, multicolumn_format=multicolumn_format, multirow=multirow, caption=caption, label=label, position=position, ) @final @doc( storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"], ) @deprecate_kwarg(old_arg_name="line_terminator", new_arg_name="lineterminator") def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, sep: str = ",", na_rep: str = "", float_format: str | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | list[str] = True, index: bool_t = True, index_label: IndexLabel | None = None, mode: str = "w", encoding: str | None = None, compression: CompressionOptions = "infer", quoting: int | None = None, quotechar: str = '"', lineterminator: str | None = None, chunksize: int | None = None, date_format: str | None = None, doublequote: bool_t = True, escapechar: str | None = None, decimal: str = ".", errors: str = "strict", storage_options: StorageOptions = None, ) -> str | None: r""" Write object to a comma-separated values (csv) file. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. If a non-binary file object is passed, it should be opened with `newline=''`, disabling universal newlines. If a binary file object is passed, `mode` might need to contain a `'b'`. .. versionchanged:: 1.2.0 Support for binary file objects was introduced. sep : str, default ',' String of length 1. Field delimiter for the output file. na_rep : str, default '' Missing data representation. float_format : str, default None Format string for floating point numbers. columns : sequence, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, or False, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the object uses MultiIndex. If False do not print fields for index names. Use index_label=False for easier importing in R. mode : str Python write mode, default 'w'. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. `encoding` is not supported if `path_or_buf` is a non-binary file object. {compression_options} .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other entries as additional compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is supported for compression modes 'gzip', 'bz2', 'zstd', and 'zip'. .. versionchanged:: 1.2.0 Compression is supported for binary file objects. .. versionchanged:: 1.2.0 Previous versions forwarded dict entries for 'gzip' to `gzip.open` instead of `gzip.GzipFile` which prevented setting `mtime`. quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC will treat them as non-numeric. quotechar : str, default '\"' String of length 1. Character used to quote fields. lineterminator : str, optional The newline character or character sequence to use in the output file. Defaults to `os.linesep`, which depends on the OS in which this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.). .. versionchanged:: 1.5.0 Previously was line_terminator, changed for consistency with read_csv and the standard library 'csv' module. chunksize : int or None Rows to write at a time. date_format : str, default None Format string for datetime objects. doublequote : bool, default True Control quoting of `quotechar` inside a field. escapechar : str, default None String of length 1. Character used to escape `sep` and `quotechar` when appropriate. decimal : str, default '.' Character recognized as decimal separator. E.g. use ',' for European data. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 Returns ------- None or str If path_or_buf is None, returns the resulting csv format as a string. Otherwise returns None. See Also -------- read_csv : Load a CSV file into a DataFrame. to_excel : Write DataFrame to an Excel file. Examples -------- >>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}}) >>> df.to_csv(index=False) 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n' Create 'out.zip' containing 'out.csv' >>> compression_opts = dict(method='zip', ... archive_name='out.csv') # doctest: +SKIP >>> df.to_csv('out.zip', index=False, ... compression=compression_opts) # doctest: +SKIP To write a csv file to a new folder or nested folder you will first need to create it using either Pathlib or os: >>> from pathlib import Path # doctest: +SKIP >>> filepath = Path('folder/subfolder/out.csv') # doctest: +SKIP >>> filepath.parent.mkdir(parents=True, exist_ok=True) # doctest: +SKIP >>> df.to_csv(filepath) # doctest: +SKIP >>> import os # doctest: +SKIP >>> os.makedirs('folder/subfolder', exist_ok=True) # doctest: +SKIP >>> df.to_csv('folder/subfolder/out.csv') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() formatter = DataFrameFormatter( frame=df, header=header, index=index, na_rep=na_rep, float_format=float_format, decimal=decimal, ) return DataFrameRenderer(formatter).to_csv( path_or_buf, lineterminator=lineterminator, sep=sep, encoding=encoding, errors=errors, compression=compression, quoting=quoting, columns=columns, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, date_format=date_format, doublequote=doublequote, escapechar=escapechar, storage_options=storage_options, ) # ---------------------------------------------------------------------- # Lookup Caching def _reset_cacher(self) -> None: """ Reset the cacher. """ raise AbstractMethodError(self) def _maybe_update_cacher( self, clear: bool_t = False, verify_is_copy: bool_t = True, inplace: bool_t = False, ) -> None: """ See if we need to update our parent cacher if clear, then clear our cache. Parameters ---------- clear : bool, default False Clear the item cache. verify_is_copy : bool, default True Provide is_copy checks. """ if verify_is_copy: self._check_setitem_copy(t="referent") if clear: self._clear_item_cache() def _clear_item_cache(self) -> None: raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Indexing Methods def take( self: NDFrameT, indices, axis=0, is_copy: bool_t | None = None, **kwargs ) -> NDFrameT: """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. is_copy : bool Before pandas 1.0, ``is_copy=False`` can be specified to ensure that the return value is an actual copy. Starting with pandas 1.0, ``take`` always returns a copy, and the keyword is therefore deprecated. .. deprecated:: 1.0.0 **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- taken : same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=['name', 'class', 'max_speed'], ... index=[0, 2, 3, 1]) >>> df name class max_speed 0 falcon bird 389.0 2 parrot bird 24.0 3 lion mammal 80.5 1 monkey mammal NaN Take elements at positions 0 and 3 along the axis 0 (default). Note how the actual indices selected (0 and 1) do not correspond to our selected indices 0 and 3. That's because we are selecting the 0th and 3rd rows, not rows whose indices equal 0 and 3. >>> df.take([0, 3]) name class max_speed 0 falcon bird 389.0 1 monkey mammal NaN Take elements at indices 1 and 2 along the axis 1 (column selection). >>> df.take([1, 2], axis=1) class max_speed 0 bird 389.0 2 bird 24.0 3 mammal 80.5 1 mammal NaN We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> df.take([-1, -2]) name class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ if is_copy is not None: warnings.warn( "is_copy is deprecated and will be removed in a future version. " "'take' always returns a copy, so there is no need to specify this.", FutureWarning, stacklevel=find_stack_level(), ) nv.validate_take((), kwargs) return self._take(indices, axis) def _take( self: NDFrameT, indices, axis=0, convert_indices: bool_t = True, ) -> NDFrameT: """ Internal version of the `take` allowing specification of additional args. See the docstring of `take` for full explanation of the parameters. """ self._consolidate_inplace() new_data = self._mgr.take( indices, axis=self._get_block_manager_axis(axis), verify=True, convert_indices=convert_indices, ) return self._constructor(new_data).__finalize__(self, method="take") def _take_with_is_copy(self: NDFrameT, indices, axis=0) -> NDFrameT: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). See the docstring of `take` for full explanation of the parameters. """ result = self._take(indices=indices, axis=axis) # Maybe set copy if we didn't actually change the index. if not result._get_axis(axis).equals(self._get_axis(axis)): result._set_is_copy(self) return result @final def xs(self, key, axis=0, level=None, drop_level: bool_t = True): """ Return cross-section from the Series/DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis to retrieve cross-section on. level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. drop_level : bool, default True If False, returns object with same levels as self. Returns ------- Series or DataFrame Cross-section from the original Series or DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Notes ----- `xs` can not be used to set values. MultiIndex Slicers is a generic way to get/set values on any level or levels. It is a superset of `xs` functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = pd.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog')) num_legs num_wings locomotion walks 4 0 Get values at specified index and level >>> df.xs('cat', level=1) num_legs num_wings class locomotion mammal walks 4 0 Get values at several indexes and levels >>> df.xs(('bird', 'walks'), ... level=[0, 'locomotion']) num_legs num_wings animal penguin 2 2 Get values at specified column and axis >>> df.xs('num_wings', axis=1) class animal locomotion mammal cat walks 0 dog walks 0 bat flies 2 bird penguin walks 2 Name: num_wings, dtype: int64 """ axis = self._get_axis_number(axis) labels = self._get_axis(axis) if isinstance(key, list): warnings.warn( "Passing lists as key for xs is deprecated and will be removed in a " "future version. Pass key as a tuple instead.", FutureWarning, stacklevel=find_stack_level(), ) if level is not None: if not isinstance(labels, MultiIndex): raise TypeError("Index must be a MultiIndex") loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level) # create the tuple of the indexer _indexer = [slice(None)] * self.ndim _indexer[axis] = loc indexer = tuple(_indexer) result = self.iloc[indexer] setattr(result, result._get_axis_name(axis), new_ax) return result if axis == 1: if drop_level: return self[key] index = self.columns else: index = self.index self._consolidate_inplace() if isinstance(index, MultiIndex): loc, new_index = index._get_loc_level(key, level=0) if not drop_level: if lib.is_integer(loc): new_index = index[loc : loc + 1] else: new_index = index[loc] else: loc = index.get_loc(key) if isinstance(loc, np.ndarray): if loc.dtype == np.bool_: (inds,) = loc.nonzero() return self._take_with_is_copy(inds, axis=axis) else: return self._take_with_is_copy(loc, axis=axis) if not is_scalar(loc): new_index = index[loc] if is_scalar(loc) and axis == 0: # In this case loc should be an integer if self.ndim == 1: # if we encounter an array-like and we only have 1 dim # that means that their are list/ndarrays inside the Series! # so just return them (GH 6394) return self._values[loc] new_values = self._mgr.fast_xs(loc) result = self._constructor_sliced( new_values, index=self.columns, name=self.index[loc], dtype=new_values.dtype, ) elif is_scalar(loc): result = self.iloc[:, slice(loc, loc + 1)] elif axis == 1: result = self.iloc[:, loc] else: result = self.iloc[loc] result.index = new_index # this could be a view # but only in a single-dtyped view sliceable case result._set_is_copy(self, copy=not result._is_view) return result def __getitem__(self, item): raise AbstractMethodError(self) def _slice(self: NDFrameT, slobj: slice, axis=0) -> NDFrameT: """ Construct a slice of this container. Slicing with this method is *always* positional. """ assert isinstance(slobj, slice), type(slobj) axis = self._get_block_manager_axis(axis) result = self._constructor(self._mgr.get_slice(slobj, axis=axis)) result = result.__finalize__(self) # this could be a view # but only in a single-dtyped view sliceable case is_copy = axis != 0 or result._is_view result._set_is_copy(self, copy=is_copy) return result @final def _set_is_copy(self, ref: NDFrame, copy: bool_t = True) -> None: if not copy: self._is_copy = None else: assert ref is not None self._is_copy = weakref.ref(ref) def _check_is_chained_assignment_possible(self) -> bool_t: """ Check if we are a view, have a cacher, and are of mixed type. If so, then force a setitem_copy check. Should be called just near setting a value Will return a boolean if it we are a view and are cached, but a single-dtype meaning that the cacher should be updated following setting. """ if self._is_copy: self._check_setitem_copy(t="referent") return False @final def _check_setitem_copy(self, t="setting", force=False): """ Parameters ---------- t : str, the type of setting error force : bool, default False If True, then force showing an error. validate if we are doing a setitem on a chained copy. It is technically possible to figure out that we are setting on a copy even WITH a multi-dtyped pandas object. In other words, some blocks may be views while other are not. Currently _is_view will ALWAYS return False for multi-blocks to avoid having to handle this case. df = DataFrame(np.arange(0,9), columns=['count']) df['group'] = 'b' # This technically need not raise SettingWithCopy if both are view # (which is not generally guaranteed but is usually True. However, # this is in general not a good practice and we recommend using .loc. df.iloc[0:5]['group'] = 'a' """ # return early if the check is not needed if not (force or self._is_copy): return value = config.get_option("mode.chained_assignment") if value is None: return # see if the copy is not actually referred; if so, then dissolve # the copy weakref if self._is_copy is not None and not isinstance(self._is_copy, str): r = self._is_copy() if not gc.get_referents(r) or (r is not None and r.shape == self.shape): self._is_copy = None return # a custom message if isinstance(self._is_copy, str): t = self._is_copy elif t == "referent": t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame\n\n" "See the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) else: t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame.\n" "Try using .loc[row_indexer,col_indexer] = value " "instead\n\nSee the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) if value == "raise": raise com.SettingWithCopyError(t) elif value == "warn": warnings.warn(t, com.SettingWithCopyWarning, stacklevel=find_stack_level()) def __delitem__(self, key) -> None: """ Delete item """ deleted = False maybe_shortcut = False if self.ndim == 2 and isinstance(self.columns, MultiIndex): try: # By using engine's __contains__ we effectively # restrict to same-length tuples maybe_shortcut = key not in self.columns._engine except TypeError: pass if maybe_shortcut: # Allow shorthand to delete all columns whose first len(key) # elements match key: if not isinstance(key, tuple): key = (key,) for col in self.columns: if isinstance(col, tuple) and col[: len(key)] == key: del self[col] deleted = True if not deleted: # If the above loop ran and didn't delete anything because # there was no match, this call should raise the appropriate # exception: loc = self.axes[-1].get_loc(key) self._mgr = self._mgr.idelete(loc) # delete from the caches try: del self._item_cache[key] except KeyError: pass # ---------------------------------------------------------------------- # Unsorted @final def _check_inplace_and_allows_duplicate_labels(self, inplace): if inplace and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'inplace=True' when " "'self.flags.allows_duplicate_labels' is False." ) @final def get(self, key, default=None): """ Get item from object for given key (ex: DataFrame column). Returns default value if not found. Parameters ---------- key : object Returns ------- value : same type as items contained in object Examples -------- >>> df = pd.DataFrame( ... [ ... [24.3, 75.7, "high"], ... [31, 87.8, "high"], ... [22, 71.6, "medium"], ... [35, 95, "medium"], ... ], ... columns=["temp_celsius", "temp_fahrenheit", "windspeed"], ... index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"), ... ) >>> df temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df.get(["temp_celsius", "windspeed"]) temp_celsius windspeed 2014-02-12 24.3 high 2014-02-13 31.0 high 2014-02-14 22.0 medium 2014-02-15 35.0 medium If the key isn't found, the default value will be used. >>> df.get(["temp_celsius", "temp_kelvin"], default="default_value") 'default_value' """ try: return self[key] except (KeyError, ValueError, IndexError): return default @final @property def _is_view(self) -> bool_t: """Return boolean indicating if self is view of another array""" return self._mgr.is_view @final def reindex_like( self: NDFrameT, other, method: str | None = None, copy: bool_t = True, limit=None, tolerance=None, ) -> NDFrameT: """ Return an object with matching indices as other object. Conform the object to the same index on all axes. Optional filling logic, placing NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. Parameters ---------- other : Object of the same data type Its row and column indices are used to define the new indices of this object. method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: propagate last valid observation forward to next valid * backfill / bfill: use next valid observation to fill gap * nearest: use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. limit : int, default None Maximum number of consecutive labels to fill for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- Series or DataFrame Same type as caller, but with changed indices on each axis. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex : Change to new indices or expand indices. Notes ----- Same as calling ``.reindex(index=other.index, columns=other.columns,...)``. Examples -------- >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'], ... [31, 87.8, 'high'], ... [22, 71.6, 'medium'], ... [35, 95, 'medium']], ... columns=['temp_celsius', 'temp_fahrenheit', ... 'windspeed'], ... index=pd.date_range(start='2014-02-12', ... end='2014-02-15', freq='D')) >>> df1 temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df2 = pd.DataFrame([[28, 'low'], ... [30, 'low'], ... [35.1, 'medium']], ... columns=['temp_celsius', 'windspeed'], ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13', ... '2014-02-15'])) >>> df2 temp_celsius windspeed 2014-02-12 28.0 low 2014-02-13 30.0 low 2014-02-15 35.1 medium >>> df2.reindex_like(df1) temp_celsius temp_fahrenheit windspeed 2014-02-12 28.0 NaN low 2014-02-13 30.0 NaN low 2014-02-14 NaN NaN NaN 2014-02-15 35.1 NaN medium """ d = other._construct_axes_dict( axes=self._AXIS_ORDERS, method=method, copy=copy, limit=limit, tolerance=tolerance, ) return self.reindex(**d) def drop( self, labels=None, axis=0, index=None, columns=None, level=None, inplace: bool_t = False, errors: str = "raise", ): inplace = validate_bool_kwarg(inplace, "inplace") if labels is not None: if index is not None or columns is not None: raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis_name = self._get_axis_name(axis) axes = {axis_name: labels} elif index is not None or columns is not None: axes, _ = self._construct_axes_from_arguments((index, columns), {}) else: raise ValueError( "Need to specify at least one of 'labels', 'index' or 'columns'" ) obj = self for axis, labels in axes.items(): if labels is not None: obj = obj._drop_axis(labels, axis, level=level, errors=errors) if inplace: self._update_inplace(obj) else: return obj @final def _drop_axis( self: NDFrameT, labels, axis, level=None, errors: str = "raise", only_slice: bool_t = False, ) -> NDFrameT: """ Drop labels from specified axis. Used in the ``drop`` method internally. Parameters ---------- labels : single label or list-like axis : int or axis name level : int or level name, default None For MultiIndex errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. only_slice : bool, default False Whether indexing along columns should be view-only. """ axis_num = self._get_axis_number(axis) axis = self._get_axis(axis) if axis.is_unique: if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") new_axis = axis.drop(labels, level=level, errors=errors) else: new_axis = axis.drop(labels, errors=errors) indexer = axis.get_indexer(new_axis) # Case for non-unique axis else: is_tuple_labels = is_nested_list_like(labels) or isinstance(labels, tuple) labels = ensure_object(com.index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") mask = ~axis.get_level_values(level).isin(labels) # GH 18561 MultiIndex.drop should raise if label is absent if errors == "raise" and mask.all(): raise KeyError(f"{labels} not found in axis") elif ( isinstance(axis, MultiIndex) and labels.dtype == "object" and not is_tuple_labels ): # Set level to zero in case of MultiIndex and label is string, # because isin can't handle strings for MultiIndexes GH#36293 # In case of tuples we get dtype object but have to use isin GH#42771 mask = ~axis.get_level_values(0).isin(labels) else: mask = ~axis.isin(labels) # Check if label doesn't exist along axis labels_missing = (axis.get_indexer_for(labels) == -1).any() if errors == "raise" and labels_missing: raise KeyError(f"{labels} not found in axis") if is_extension_array_dtype(mask.dtype): # GH#45860 mask = mask.to_numpy(dtype=bool) indexer = mask.nonzero()[0] new_axis = axis.take(indexer) bm_axis = self.ndim - axis_num - 1 new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=bm_axis, allow_dups=True, only_slice=only_slice, ) result = self._constructor(new_mgr) if self.ndim == 1: result.name = self.name return result.__finalize__(self) @final def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None: """ Replace self internals with result. Parameters ---------- result : same type as self verify_is_copy : bool, default True Provide is_copy checks. """ # NOTE: This does *not* call __finalize__ and that's an explicit # decision that we may revisit in the future. self._reset_cache() self._clear_item_cache() self._mgr = result._mgr self._maybe_update_cacher(verify_is_copy=verify_is_copy, inplace=True) @final def add_prefix(self: NDFrameT, prefix: str) -> NDFrameT: """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_prefix('item_') item_0 1 item_1 2 item_2 3 item_3 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ f = functools.partial("{prefix}{}".format, prefix=prefix) mapper = {self._info_axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" return self._rename(**mapper) # type: ignore[return-value, arg-type] @final def add_suffix(self: NDFrameT, suffix: str) -> NDFrameT: """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_suffix('_item') 0_item 1 1_item 2 2_item 3 3_item 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ f = functools.partial("{}{suffix}".format, suffix=suffix) mapper = {self._info_axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" return self._rename(**mapper) # type: ignore[return-value, arg-type] def sort_values( self, axis=0, ascending=True, inplace: bool_t = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool_t = False, key: ValueKeyFunc = None, ): """ Sort by the values along either axis. Parameters ----------%(optional_by)s axis : %(axes_single_arg)s, default 0 Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. `mergesort` and `stable` are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 1.0.0 key : callable, optional Apply the key function to the values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return a Series with the same shape as the input. It will be applied to each column in `by` independently. .. versionadded:: 1.1.0 Returns ------- DataFrame or None DataFrame with sorted values or None if ``inplace=True``. See Also -------- DataFrame.sort_index : Sort a DataFrame by the index. Series.sort_values : Similar method for a Series. Examples -------- >>> df = pd.DataFrame({ ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... 'col4': ['a', 'B', 'c', 'D', 'e', 'F'] ... }) >>> df col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort by multiple columns >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 col4 1 A 1 1 B 0 A 2 0 a 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 col4 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B 3 NaN 8 4 D Putting NAs first >>> df.sort_values(by='col1', ascending=False, na_position='first') col1 col2 col3 col4 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B Sorting with a key function >>> df.sort_values(by='col4', key=lambda col: col.str.lower()) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Natural sort with the key argument, using the `natsort <https://github.com/SethMMorton/natsort>` package. >>> df = pd.DataFrame({ ... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'], ... "value": [10, 20, 30, 40, 50] ... }) >>> df time value 0 0hr 10 1 128hr 20 2 72hr 30 3 48hr 40 4 96hr 50 >>> from natsort import index_natsorted >>> df.sort_values( ... by="time", ... key=lambda x: np.argsort(index_natsorted(df["time"])) ... ) time value 0 0hr 10 3 48hr 40 2 72hr 30 4 96hr 50 1 128hr 20 """ raise AbstractMethodError(self) def sort_index( self, axis=0, level=None, ascending: bool_t | int | Sequence[bool_t | int] = True, inplace: bool_t = False, kind: str = "quicksort", na_position: str = "last", sort_remaining: bool_t = True, ignore_index: bool_t = False, key: IndexKeyFunc = None, ): inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) ascending = validate_ascending(ascending) target = self._get_axis(axis) indexer = get_indexer_indexer( target, level, ascending, kind, na_position, sort_remaining, key ) if indexer is None: if inplace: result = self else: result = self.copy() if ignore_index: result.index = default_index(len(self)) if inplace: return else: return result baxis = self._get_block_manager_axis(axis) new_data = self._mgr.take(indexer, axis=baxis, verify=False) # reconstruct axis if needed new_data.set_axis(baxis, new_data.axes[baxis]._sort_levels_monotonic()) if ignore_index: axis = 1 if isinstance(self, ABCDataFrame) else 0 new_data.set_axis(axis, default_index(len(indexer))) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="sort_index") @doc( klass=_shared_doc_kwargs["klass"], axes=_shared_doc_kwargs["axes"], optional_labels="", optional_axis="", ) def reindex(self: NDFrameT, *args, **kwargs) -> NDFrameT: """ Conform {klass} to new index with optional filling logic. Places NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and ``copy=False``. Parameters ---------- {optional_labels} {axes} : array-like, optional New labels / index to conform to, should be specified using keywords. Preferably an Index object to avoid duplicating data. {optional_axis} method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: Propagate last valid observation forward to next valid. * backfill / bfill: Use next valid observation to fill gap. * nearest: Use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. limit : int, default None Maximum number of consecutive elements to forward or backward fill. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations most satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- {klass} with changed index. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` * ``(labels, axis={{'index', 'columns'}}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] >>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301], ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}}, ... index=index) >>> df http_status response_time Firefox 200 0.04 Chrome 200 0.02 Safari 404 0.07 IE10 404 0.08 Konqueror 301 1.00 Create a new index and reindex the dataframe. By default values in the new index that do not have corresponding records in the dataframe are assigned ``NaN``. >>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', ... 'Chrome'] >>> df.reindex(new_index) http_status response_time Safari 404.0 0.07 Iceweasel NaN NaN Comodo Dragon NaN NaN IE10 404.0 0.08 Chrome 200.0 0.02 We can fill in the missing values by passing a value to the keyword ``fill_value``. Because the index is not monotonically increasing or decreasing, we cannot use arguments to the keyword ``method`` to fill the ``NaN`` values. >>> df.reindex(new_index, fill_value=0) http_status response_time Safari 404 0.07 Iceweasel 0 0.00 Comodo Dragon 0 0.00 IE10 404 0.08 Chrome 200 0.02 >>> df.reindex(new_index, fill_value='missing') http_status response_time Safari 404 0.07 Iceweasel missing missing Comodo Dragon missing missing IE10 404 0.08 Chrome 200 0.02 We can also reindex the columns. >>> df.reindex(columns=['http_status', 'user_agent']) http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN Or we can use "axis-style" keyword arguments >>> df.reindex(['http_status', 'user_agent'], axis="columns") http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') >>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}}, ... index=date_index) >>> df2 prices 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 Suppose we decide to expand the dataframe to cover a wider date range. >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') >>> df2.reindex(date_index2) prices 2009-12-29 NaN 2009-12-30 NaN 2009-12-31 NaN 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN The index entries that did not have a value in the original data frame (for example, '2009-12-29') are by default filled with ``NaN``. If desired, we can fill in the missing values using one of several options. For example, to back-propagate the last valid value to fill the ``NaN`` values, pass ``bfill`` as an argument to the ``method`` keyword. >>> df2.reindex(date_index2, method='bfill') prices 2009-12-29 100.0 2009-12-30 100.0 2009-12-31 100.0 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN Please note that the ``NaN`` value present in the original dataframe (at index value 2010-01-03) will not be filled by any of the value propagation schemes. This is because filling while reindexing does not look at dataframe values, but only compares the original and desired indexes. If you do want to fill in the ``NaN`` values present in the original dataframe, use the ``fillna()`` method. See the :ref:`user guide <basics.reindexing>` for more. """ # TODO: Decide if we care about having different examples for different # kinds # construct the args axes, kwargs = self._construct_axes_from_arguments(args, kwargs) method = missing.clean_reindex_fill_method(kwargs.pop("method", None)) level = kwargs.pop("level", None) copy = kwargs.pop("copy", True) limit = kwargs.pop("limit", None) tolerance = kwargs.pop("tolerance", None) fill_value = kwargs.pop("fill_value", None) # Series.reindex doesn't use / need the axis kwarg # We pop and ignore it here, to make writing Series/Frame generic code # easier kwargs.pop("axis", None) if kwargs: raise TypeError( "reindex() got an unexpected keyword " f'argument "{list(kwargs.keys())[0]}"' ) self._consolidate_inplace() # if all axes that are requested to reindex are equal, then only copy # if indicated must have index names equal here as well as values if all( self._get_axis(axis).identical(ax) for axis, ax in axes.items() if ax is not None ): if copy: return self.copy() return self # check if we are a multi reindex if self._needs_reindex_multi(axes, method, level): return self._reindex_multi(axes, copy, fill_value) # perform the reindex on the axes return self._reindex_axes( axes, level, limit, tolerance, method, fill_value, copy ).__finalize__(self, method="reindex") def _reindex_axes( self: NDFrameT, axes, level, limit, tolerance, method, fill_value, copy ) -> NDFrameT: """Perform the reindex for all the axes.""" obj = self for a in self._AXIS_ORDERS: labels = axes[a] if labels is None: continue ax = self._get_axis(a) new_index, indexer = ax.reindex( labels, level=level, limit=limit, tolerance=tolerance, method=method ) axis = self._get_axis_number(a) obj = obj._reindex_with_indexers( {axis: [new_index, indexer]}, fill_value=fill_value, copy=copy, allow_dups=False, ) # If we've made a copy once, no need to make another one copy = False return obj def _needs_reindex_multi(self, axes, method, level) -> bool_t: """Check if we do need a multi reindex.""" return ( (com.count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type ) def _reindex_multi(self, axes, copy, fill_value): raise AbstractMethodError(self) @final def _reindex_with_indexers( self: NDFrameT, reindexers, fill_value=None, copy: bool_t = False, allow_dups: bool_t = False, ) -> NDFrameT: """allow_dups indicates an internal call here""" # reindex doing multiple operations on different axes if indicated new_data = self._mgr for axis in sorted(reindexers.keys()): index, indexer = reindexers[axis] baxis = self._get_block_manager_axis(axis) if index is None: continue index = ensure_index(index) if indexer is not None: indexer = ensure_platform_int(indexer) # TODO: speed up on homogeneous DataFrame objects (see _reindex_multi) new_data = new_data.reindex_indexer( index, indexer, axis=baxis, fill_value=fill_value, allow_dups=allow_dups, copy=copy, ) # If we've made a copy once, no need to make another one copy = False if copy and new_data is self._mgr: new_data = new_data.copy() return self._constructor(new_data).__finalize__(self) def filter( self: NDFrameT, items=None, like: str | None = None, regex: str | None = None, axis=None, ) -> NDFrameT: """ Subset the dataframe rows or columns according to the specified index labels. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : str Keep labels from axis for which "like in label == True". regex : str (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : {0 or ‘index’, 1 or ‘columns’, None}, default None The axis to filter on, expressed either as an index (int) or axis name (str). By default this is the info axis, 'index' for Series, 'columns' for DataFrame. Returns ------- same type as input object See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> df one two three mouse 1 2 3 rabbit 4 5 6 >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 """ nkw = com.count_not_none(items, like, regex) if nkw > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive" ) if axis is None: axis = self._info_axis_name labels = self._get_axis(axis) if items is not None: name = self._get_axis_name(axis) return self.reindex(**{name: [r for r in items if r in labels]}) elif like: def f(x) -> bool_t: assert like is not None # needed for mypy return like in ensure_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: def f(x) -> bool_t: return matcher.search(ensure_str(x)) is not None matcher = re.compile(regex) values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError("Must pass either `items`, `like`, or `regex`") @final def head(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. For negative values of `n`, this function returns all rows except the last `n` rows, equivalent to ``df[:-n]``. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- same type as caller The first `n` rows of the caller object. See Also -------- DataFrame.tail: Returns the last `n` rows. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon For negative values of `n` >>> df.head(-3) animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot """ return self.iloc[:n] @final def tail(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the last `n` rows. This function returns last `n` rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. For negative values of `n`, this function returns all rows except the first `n` rows, equivalent to ``df[n:]``. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- type of caller The last `n` rows of the caller object. See Also -------- DataFrame.head : The first `n` rows of the caller object. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last 5 lines >>> df.tail() animal 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last `n` lines (three in this case) >>> df.tail(3) animal 6 shark 7 whale 8 zebra For negative values of `n` >>> df.tail(-3) animal 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra """ if n == 0: return self.iloc[0:0] return self.iloc[-n:] @final def sample( self: NDFrameT, n: int | None = None, frac: float | None = None, replace: bool_t = False, weights=None, random_state: RandomState | None = None, axis: Axis | None = None, ignore_index: bool_t = False, ) -> NDFrameT: """ Return a random sample of items from an axis of object. You can use `random_state` for reproducibility. Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with `frac`. Default = 1 if `frac` = None. frac : float, optional Fraction of axis items to return. Cannot be used with `n`. replace : bool, default False Allow or disallow sampling of the same row more than once. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. Infinite values not allowed. random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional If int, array-like, or BitGenerator, seed for random number generator. If np.random.RandomState or np.random.Generator, use as given. .. versionchanged:: 1.1.0 array-like and BitGenerator object now passed to np.random.RandomState() as seed .. versionchanged:: 1.4.0 np.random.Generator objects now accepted axis : {0 or ‘index’, 1 or ‘columns’, None}, default None Axis to sample. Accepts axis number or name. Default is stat axis for given data type (0 for Series and DataFrames). ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.3.0 Returns ------- Series or DataFrame A new object of same type as caller containing `n` items randomly sampled from the caller object. See Also -------- DataFrameGroupBy.sample: Generates random samples from each group of a DataFrame object. SeriesGroupBy.sample: Generates random samples from each group of a Series object. numpy.random.choice: Generates a random sample from a given 1-D numpy array. Notes ----- If `frac` > 1, `replacement` should be set to `True`. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish']) >>> df num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 Extract 3 random elements from the ``Series`` ``df['num_legs']``: Note that we use `random_state` to ensure the reproducibility of the examples. >>> df['num_legs'].sample(n=3, random_state=1) fish 0 spider 8 falcon 2 Name: num_legs, dtype: int64 A random 50% sample of the ``DataFrame`` with replacement: >>> df.sample(frac=0.5, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 An upsample sample of the ``DataFrame`` with replacement: Note that `replace` parameter has to be `True` for `frac` parameter > 1. >>> df.sample(frac=2, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 falcon 2 2 10 falcon 2 2 10 fish 0 0 8 dog 4 0 2 fish 0 0 8 dog 4 0 2 Using a DataFrame column as weights. Rows with larger value in the `num_specimen_seen` column are more likely to be sampled. >>> df.sample(n=2, weights='num_specimen_seen', random_state=1) num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 """ # noqa:E501 if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) obj_len = self.shape[axis] # Process random_state argument rs = com.random_state(random_state) size = sample.process_sampling_size(n, frac, replace) if size is None: assert frac is not None size = round(frac * obj_len) if weights is not None: weights = sample.preprocess_weights(self, weights, axis) sampled_indices = sample.sample(obj_len, size, replace, weights, rs) result = self.take(sampled_indices, axis=axis) if ignore_index: result.index = default_index(len(result)) return result @final @doc(klass=_shared_doc_kwargs["klass"]) def pipe( self, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs, ) -> T: r""" Apply chainable functions that expect Series or DataFrames. Parameters ---------- func : function Function to apply to the {klass}. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the {klass}. args : iterable, optional Positional arguments passed into ``func``. kwargs : mapping, optional A dictionary of keyword arguments passed into ``func``. Returns ------- object : the return type of ``func``. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Notes ----- Use ``.pipe`` when chaining together functions that expect Series, DataFrames or GroupBy objects. Instead of writing >>> func(g(h(df), arg1=a), arg2=b, arg3=c) # doctest: +SKIP You can write >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe(func, arg2=b, arg3=c) ... ) # doctest: +SKIP If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``f`` takes its data as ``arg2``: >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe((func, 'arg2'), arg1=a, arg3=c) ... ) # doctest: +SKIP """ return com.pipe(self, func, *args, **kwargs) # ---------------------------------------------------------------------- # Attribute access @final def __finalize__( self: NDFrameT, other, method: str | None = None, **kwargs ) -> NDFrameT: """ Propagate metadata from other to self. Parameters ---------- other : the object from which to get the attributes that we are going to propagate method : str, optional A passed method name providing context on where ``__finalize__`` was called. .. warning:: The value passed as `method` are not currently considered stable across pandas releases. """ if isinstance(other, NDFrame): for name in other.attrs: self.attrs[name] = other.attrs[name] self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels # For subclasses using _metadata. for name in set(self._metadata) & set(other._metadata): assert isinstance(name, str) object.__setattr__(self, name, getattr(other, name, None)) if method == "concat": attrs = other.objs[0].attrs check_attrs = all(objs.attrs == attrs for objs in other.objs[1:]) if check_attrs: for name in attrs: self.attrs[name] = attrs[name] allows_duplicate_labels = all( x.flags.allows_duplicate_labels for x in other.objs ) self.flags.allows_duplicate_labels = allows_duplicate_labels return self def __getattr__(self, name: str): """ After regular attribute access, try looking up the name This allows simpler access to columns for interactive use. """ # Note: obj.x will always call obj.__getattribute__('x') prior to # calling obj.__getattr__('x'). if ( name not in self._internal_names_set and name not in self._metadata and name not in self._accessors and self._info_axis._can_hold_identifiers_and_holds_name(name) ): return self[name] return object.__getattribute__(self, name) def __setattr__(self, name: str, value) -> None: """ After regular attribute access, try setting the name This allows simpler access to columns for interactive use. """ # first try regular attribute access via __getattribute__, so that # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify # the same attribute. try: object.__getattribute__(self, name) return object.__setattr__(self, name, value) except AttributeError: pass # if this fails, go on to more involved attribute setting # (note that this matches __getattr__, above). if name in self._internal_names_set: object.__setattr__(self, name, value) elif name in self._metadata: object.__setattr__(self, name, value) else: try: existing = getattr(self, name) if isinstance(existing, Index): object.__setattr__(self, name, value) elif name in self._info_axis: self[name] = value else: object.__setattr__(self, name, value) except (AttributeError, TypeError): if isinstance(self, ABCDataFrame) and (is_list_like(value)): warnings.warn( "Pandas doesn't allow columns to be " "created via a new attribute name - see " "https://pandas.pydata.org/pandas-docs/" "stable/indexing.html#attribute-access", stacklevel=find_stack_level(), ) object.__setattr__(self, name, value) @final def _dir_additions(self) -> set[str]: """ add the string-like attributes from the info_axis. If info_axis is a MultiIndex, its first level values are used. """ additions = super()._dir_additions() if self._info_axis._can_hold_strings: additions.update(self._info_axis._dir_additions_for_owner) return additions # ---------------------------------------------------------------------- # Consolidation of internals @final def _protect_consolidate(self, f): """ Consolidate _mgr -- if the blocks have changed, then clear the cache """ if isinstance(self._mgr, (ArrayManager, SingleArrayManager)): return f() blocks_before = len(self._mgr.blocks) result = f() if len(self._mgr.blocks) != blocks_before: self._clear_item_cache() return result @final def _consolidate_inplace(self) -> None: """Consolidate data in place and return None""" def f(): self._mgr = self._mgr.consolidate() self._protect_consolidate(f) @final def _consolidate(self): """ Compute NDFrame with "consolidated" internals (data of each dtype grouped together in a single ndarray). Returns ------- consolidated : same type as caller """ f = lambda: self._mgr.consolidate() cons_data = self._protect_consolidate(f) return self._constructor(cons_data).__finalize__(self) @final @property def _is_mixed_type(self) -> bool_t: if self._mgr.is_single_block: return False if self._mgr.any_extension_types: # Even if they have the same dtype, we can't consolidate them, # so we pretend this is "mixed'" return True return self.dtypes.nunique() > 1 @final def _check_inplace_setting(self, value) -> bool_t: """check whether we allow in-place setting with this type of value""" if self._is_mixed_type and not self._mgr.is_numeric_mixed_type: # allow an actual np.nan thru if is_float(value) and np.isnan(value): return True raise TypeError( "Cannot do inplace boolean setting on " "mixed-types with a non np.nan value" ) return True @final def _get_numeric_data(self): return self._constructor(self._mgr.get_numeric_data()).__finalize__(self) @final def _get_bool_data(self): return self._constructor(self._mgr.get_bool_data()).__finalize__(self) # ---------------------------------------------------------------------- # Internal Interface Methods @property def values(self) -> np.ndarray: raise AbstractMethodError(self) @property def _values(self) -> np.ndarray: """internal implementation""" raise AbstractMethodError(self) @property def dtypes(self): """ Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object """ data = self._mgr.get_dtypes() return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_) def astype( self: NDFrameT, dtype, copy: bool_t = True, errors: str = "raise" ) -> NDFrameT: """ Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- dtype : data type, or dict of column name -> data type Use a numpy.dtype or Python type to cast entire pandas object to the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. copy : bool, default True Return a copy when ``copy=True`` (be very careful setting ``copy=False`` as changes to values then may propagate to other pandas objects). errors : {'raise', 'ignore'}, default 'raise' Control raising of exceptions on invalid data for provided dtype. - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object. Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. numpy.ndarray.astype : Cast a numpy array to a specified type. Notes ----- .. deprecated:: 1.3.0 Using ``astype`` to convert from timezone-naive dtype to timezone-aware dtype is deprecated and will raise in a future version. Use :meth:`Series.dt.tz_localize` instead. Examples -------- Create a DataFrame: >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df.dtypes col1 int64 col2 int64 dtype: object Cast all columns to int32: >>> df.astype('int32').dtypes col1 int32 col2 int32 dtype: object Cast col1 to int32 using a dictionary: >>> df.astype({'col1': 'int32'}).dtypes col1 int32 col2 int64 dtype: object Create a series: >>> ser = pd.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 dtype: int32 >>> ser.astype('int64') 0 1 1 2 dtype: int64 Convert to categorical type: >>> ser.astype('category') 0 1 1 2 dtype: category Categories (2, int64): [1, 2] Convert to ordered categorical type with custom ordering: >>> from pandas.api.types import CategoricalDtype >>> cat_dtype = CategoricalDtype( ... categories=[2, 1], ordered=True) >>> ser.astype(cat_dtype) 0 1 1 2 dtype: category Categories (2, int64): [2 < 1] Note that using ``copy=False`` and changing data on a new pandas object may propagate changes: >>> s1 = pd.Series([1, 2]) >>> s2 = s1.astype('int64', copy=False) >>> s2[0] = 10 >>> s1 # note that s1[0] has changed too 0 10 1 2 dtype: int64 Create a series of dates: >>> ser_date = pd.Series(pd.date_range('20200101', periods=3)) >>> ser_date 0 2020-01-01 1 2020-01-02 2 2020-01-03 dtype: datetime64[ns] """ if is_dict_like(dtype): if self.ndim == 1: # i.e. Series if len(dtype) > 1 or self.name not in dtype: raise KeyError( "Only the Series name can be used for " "the key in Series dtype mappings." ) new_type = dtype[self.name] return self.astype(new_type, copy, errors) # GH#44417 cast to Series so we can use .iat below, which will be # robust in case we from pandas import Series dtype_ser = Series(dtype, dtype=object) for col_name in dtype_ser.index: if col_name not in self: raise KeyError( "Only a column name can be used for the " "key in a dtype mappings argument. " f"'{col_name}' not found in columns." ) dtype_ser = dtype_ser.reindex(self.columns, fill_value=None, copy=False) results = [] for i, (col_name, col) in enumerate(self.items()): cdt = dtype_ser.iat[i] if isna(cdt): res_col = col.copy() if copy else col else: res_col = col.astype(dtype=cdt, copy=copy, errors=errors) results.append(res_col) elif is_extension_array_dtype(dtype) and self.ndim > 1: # GH 18099/22869: columnwise conversion to extension dtype # GH 24704: use iloc to handle duplicate column names # TODO(EA2D): special case not needed with 2D EAs results = [ self.iloc[:, i].astype(dtype, copy=copy) for i in range(len(self.columns)) ] else: # else, only a single dtype is given new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors) return self._constructor(new_data).__finalize__(self, method="astype") # GH 33113: handle empty frame or series if not results: return self.copy() # GH 19920: retain column metadata after concat result = concat(results, axis=1, copy=False) # GH#40810 retain subclass # error: Incompatible types in assignment # (expression has type "NDFrameT", variable has type "DataFrame") result = self._constructor(result) # type: ignore[assignment] result.columns = self.columns result = result.__finalize__(self, method="astype") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) @final def copy(self: NDFrameT, deep: bool_t = True) -> NDFrameT: """ Make a copy of this object's indices and data. When ``deep=True`` (default), a new object will be created with a copy of the calling object's data and indices. Modifications to the data or indices of the copy will not be reflected in the original object (see notes below). When ``deep=False``, a new object will be created without copying the calling object's data or index (only references to the data and index are copied). Any changes to the data of the original will be reflected in the shallow copy (and vice versa). Parameters ---------- deep : bool, default True Make a deep copy, including a copy of the data and the indices. With ``deep=False`` neither the indices nor the data are copied. Returns ------- copy : Series or DataFrame Object type matches caller. Notes ----- When ``deep=True``, data is copied but actual Python objects will not be copied recursively, only the reference to the object. This is in contrast to `copy.deepcopy` in the Standard Library, which recursively copies object data (see examples below). While ``Index`` objects are copied when ``deep=True``, the underlying numpy array is not copied for performance reasons. Since ``Index`` is immutable, the underlying data can be safely shared and a copy is not needed. Since pandas is not thread safe, see the :ref:`gotchas <gotchas.thread-safety>` when copying in a threading environment. Examples -------- >>> s = pd.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 dtype: int64 **Shallow copy versus default (deep) copy:** >>> s = pd.Series([1, 2], index=["a", "b"]) >>> deep = s.copy() >>> shallow = s.copy(deep=False) Shallow copy shares data and index with original. >>> s is shallow False >>> s.values is shallow.values and s.index is shallow.index True Deep copy has own copy of data and index. >>> s is deep False >>> s.values is deep.values or s.index is deep.index False Updates to the data shared by shallow copy and original is reflected in both; deep copy remains unchanged. >>> s[0] = 3 >>> shallow[1] = 4 >>> s a 3 b 4 dtype: int64 >>> shallow a 3 b 4 dtype: int64 >>> deep a 1 b 2 dtype: int64 Note that when copying an object containing Python objects, a deep copy will copy the data, but will not do so recursively. Updating a nested data object will be reflected in the deep copy. >>> s = pd.Series([[1, 2], [3, 4]]) >>> deep = s.copy() >>> s[0][0] = 10 >>> s 0 [10, 2] 1 [3, 4] dtype: object >>> deep 0 [10, 2] 1 [3, 4] dtype: object """ data = self._mgr.copy(deep=deep) self._clear_item_cache() return self._constructor(data).__finalize__(self, method="copy") @final def __copy__(self: NDFrameT, deep: bool_t = True) -> NDFrameT: return self.copy(deep=deep) @final def __deepcopy__(self: NDFrameT, memo=None) -> NDFrameT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) @final def _convert( self: NDFrameT, datetime: bool_t = False, numeric: bool_t = False, timedelta: bool_t = False, ) -> NDFrameT: """ Attempt to infer better dtype for object columns. Parameters ---------- datetime : bool, default False If True, convert to date where possible. numeric : bool, default False If True, attempt to convert to numbers (including strings), with unconvertible values becoming NaN. timedelta : bool, default False If True, convert to timedelta where possible. Returns ------- converted : same as input object """ validate_bool_kwarg(datetime, "datetime") validate_bool_kwarg(numeric, "numeric") validate_bool_kwarg(timedelta, "timedelta") return self._constructor( self._mgr.convert( datetime=datetime, numeric=numeric, timedelta=timedelta, copy=True, ) ).__finalize__(self) @final def infer_objects(self: NDFrameT) -> NDFrameT: """ Attempt to infer better dtypes for object columns. Attempts soft conversion of object-dtyped columns, leaving non-object and unconvertible columns unchanged. The inference rules are the same as during normal Series/DataFrame construction. Returns ------- converted : same type as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. convert_dtypes : Convert argument to best possible dtype. Examples -------- >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]}) >>> df = df.iloc[1:] >>> df A 1 1 2 2 3 3 >>> df.dtypes A object dtype: object >>> df.infer_objects().dtypes A int64 dtype: object """ # numeric=False necessary to only soft convert; # python objects will still be converted to # native numpy numeric types return self._constructor( self._mgr.convert(datetime=True, numeric=False, timedelta=True, copy=True) ).__finalize__(self, method="infer_objects") @final def convert_dtypes( self: NDFrameT, infer_objects: bool_t = True, convert_string: bool_t = True, convert_integer: bool_t = True, convert_boolean: bool_t = True, convert_floating: bool_t = True, ) -> NDFrameT: """ Convert columns to best possible dtypes using dtypes supporting ``pd.NA``. .. versionadded:: 1.0.0 Parameters ---------- infer_objects : bool, default True Whether object dtypes should be converted to the best possible types. convert_string : bool, default True Whether object dtypes should be converted to ``StringDtype()``. convert_integer : bool, default True Whether, if possible, conversion can be done to integer extension types. convert_boolean : bool, defaults True Whether object dtypes should be converted to ``BooleanDtypes()``. convert_floating : bool, defaults True Whether, if possible, conversion can be done to floating extension types. If `convert_integer` is also True, preference will be give to integer dtypes if the floats can be faithfully casted to integers. .. versionadded:: 1.2.0 Returns ------- Series or DataFrame Copy of input object with new dtype. See Also -------- infer_objects : Infer dtypes of objects. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. Notes ----- By default, ``convert_dtypes`` will attempt to convert a Series (or each Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options ``convert_string``, ``convert_integer``, ``convert_boolean`` and ``convert_boolean``, it is possible to turn off individual conversions to ``StringDtype``, the integer extension types, ``BooleanDtype`` or floating extension types, respectively. For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference rules as during normal Series/DataFrame construction. Then, if possible, convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer or floating extension type, otherwise leave as ``object``. If the dtype is integer, convert to an appropriate integer extension type. If the dtype is numeric, and consists of all integers, convert to an appropriate integer extension type. Otherwise, convert to an appropriate floating extension type. .. versionchanged:: 1.2 Starting with pandas 1.2, this method also converts float columns to the nullable floating extension type. In the future, as new dtypes are added that support ``pd.NA``, the results of this method will change to support those new dtypes. Examples -------- >>> df = pd.DataFrame( ... { ... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")), ... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")), ... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")), ... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")), ... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")), ... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")), ... } ... ) Start with a DataFrame with default dtypes. >>> df a b c d e f 0 1 x True h 10.0 NaN 1 2 y False i NaN 100.5 2 3 z NaN NaN 20.0 200.0 >>> df.dtypes a int32 b object c object d object e float64 f float64 dtype: object Convert the DataFrame to use best possible dtypes. >>> dfn = df.convert_dtypes() >>> dfn a b c d e f 0 1 x True h 10 <NA> 1 2 y False i <NA> 100.5 2 3 z <NA> <NA> 20 200.0 >>> dfn.dtypes a Int32 b string c boolean d string e Int64 f Float64 dtype: object Start with a Series of strings and missing data represented by ``np.nan``. >>> s = pd.Series(["a", "b", np.nan]) >>> s 0 a 1 b 2 NaN dtype: object Obtain a Series with dtype ``StringDtype``. >>> s.convert_dtypes() 0 a 1 b 2 <NA> dtype: string """ if self.ndim == 1: return self._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, ) else: results = [ col._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, ) for col_name, col in self.items() ] if len(results) > 0: result = concat(results, axis=1, copy=False, keys=self.columns) cons = cast(Type["DataFrame"], self._constructor) result = cons(result) result = result.__finalize__(self, method="convert_dtypes") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) else: return self.copy() # ---------------------------------------------------------------------- # Filling NA's @doc(**_shared_doc_kwargs) def fillna( self: NDFrameT, value=None, method=None, axis=None, inplace: bool_t = False, limit=None, downcast=None, ) -> NDFrameT | None: """ Fill NA/NaN values using the specified method. Parameters ---------- value : scalar, dict, Series, or DataFrame Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of values specifying which value to use for each index (for a Series) or column (for a DataFrame). Values not in the dict/Series/DataFrame will not be filled. This value cannot be a list. method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None Method to use for filling holes in reindexed Series pad / ffill: propagate last valid observation forward to next valid backfill / bfill: use next valid observation to fill gap. axis : {axes_single_arg} Axis along which to fill missing values. inplace : bool, default False If True, fill in-place. Note: this will modify any other views on this object (e.g., a no-copy slice for a column in a DataFrame). limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. See Also -------- interpolate : Fill NaN values using interpolation. reindex : Conform object to new index. asfreq : Convert TimeSeries to specified frequency. Examples -------- >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], ... [3, 4, np.nan, 1], ... [np.nan, np.nan, np.nan, np.nan], ... [np.nan, 3, np.nan, 4]], ... columns=list("ABCD")) >>> df A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 NaN NaN NaN NaN 3 NaN 3.0 NaN 4.0 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 0.0 3 0.0 3.0 0.0 4.0 We can also propagate non-null values forward or backward. >>> df.fillna(method="ffill") A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 3.0 4.0 NaN 1.0 3 3.0 3.0 NaN 4.0 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {{"A": 0, "B": 1, "C": 2, "D": 3}} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 2.0 1.0 2 0.0 1.0 2.0 3.0 3 0.0 3.0 2.0 4.0 Only replace the first NaN element. >>> df.fillna(value=values, limit=1) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 NaN 1.0 2 NaN 1.0 NaN 3.0 3 NaN 3.0 NaN 4.0 When filling using a DataFrame, replacement happens along the same column names and same indices >>> df2 = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCE")) >>> df.fillna(df2) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 NaN 3 0.0 3.0 0.0 4.0 Note that column D is not affected since it is not present in df2. """ inplace = validate_bool_kwarg(inplace, "inplace") value, method = validate_fillna_kwargs(value, method) self._consolidate_inplace() # set the default here, so functions examining the signaure # can detect if something was set (e.g. in groupby) (GH9221) if axis is None: axis = 0 axis = self._get_axis_number(axis) if value is None: if not self._mgr.is_single_block and axis == 1: if inplace: raise NotImplementedError() result = self.T.fillna(method=method, limit=limit).T return result new_data = self._mgr.interpolate( method=method, axis=axis, limit=limit, inplace=inplace, downcast=downcast, ) else: if self.ndim == 1: if isinstance(value, (dict, ABCSeries)): if not len(value): # test_fillna_nonscalar if inplace: return None return self.copy() value = create_series_with_explicit_dtype( value, dtype_if_empty=object ) value = value.reindex(self.index, copy=False) value = value._values elif not is_list_like(value): pass else: raise TypeError( '"value" parameter must be a scalar, dict ' "or Series, but you passed a " f'"{type(value).__name__}"' ) new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, (dict, ABCSeries)): if axis == 1: raise NotImplementedError( "Currently only can fill " "with dict/Series column " "by column" ) result = self if inplace else self.copy() is_dict = isinstance(downcast, dict) for k, v in value.items(): if k not in result: continue downcast_k = downcast if not is_dict else downcast.get(k) result[k] = result[k].fillna(v, limit=limit, downcast=downcast_k) return result if not inplace else None elif not is_list_like(value): if not self._mgr.is_single_block and axis == 1: result = self.T.fillna(value=value, limit=limit).T new_data = result else: new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, ABCDataFrame) and self.ndim == 2: new_data = self.where(self.notna(), value)._mgr else: raise ValueError(f"invalid fill value with a {type(value)}") result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="fillna") @doc(klass=_shared_doc_kwargs["klass"]) def ffill( self: NDFrameT, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast=None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) pad = ffill @doc(klass=_shared_doc_kwargs["klass"]) def bfill( self: NDFrameT, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast=None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) backfill = bfill @doc( _shared_docs["replace"], klass=_shared_doc_kwargs["klass"], inplace=_shared_doc_kwargs["inplace"], replace_iloc=_shared_doc_kwargs["replace_iloc"], ) def replace( self, to_replace=None, value=lib.no_default, inplace: bool_t = False, limit: int | None = None, regex=False, method=lib.no_default, ): if not ( is_scalar(to_replace) or is_re_compilable(to_replace) or is_list_like(to_replace) ): raise TypeError( "Expecting 'to_replace' to be either a scalar, array-like, " "dict or None, got invalid type " f"{repr(type(to_replace).__name__)}" ) inplace = validate_bool_kwarg(inplace, "inplace") if not is_bool(regex) and to_replace is not None: raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") self._consolidate_inplace() if value is lib.no_default or method is not lib.no_default: # GH#36984 if the user explicitly passes value=None we want to # respect that. We have the corner case where the user explicitly # passes value=None *and* a method, which we interpret as meaning # they want the (documented) default behavior. if method is lib.no_default: # TODO: get this to show up as the default in the docs? method = "pad" # passing a single value that is scalar like # when value is None (GH5319), for compat if not is_dict_like(to_replace) and not is_dict_like(regex): to_replace = [to_replace] if isinstance(to_replace, (tuple, list)): if isinstance(self, ABCDataFrame): from pandas import Series result = self.apply( Series._replace_single, args=(to_replace, method, inplace, limit), ) if inplace: return return result self = cast("Series", self) return self._replace_single(to_replace, method, inplace, limit) if not is_dict_like(to_replace): if not is_dict_like(regex): raise TypeError( 'If "to_replace" and "value" are both None ' 'and "to_replace" is not a list, then ' "regex must be a mapping" ) to_replace = regex regex = True items = list(to_replace.items()) if items: keys, values = zip(*items) else: keys, values = ([], []) are_mappings = [is_dict_like(v) for v in values] if any(are_mappings): if not all(are_mappings): raise TypeError( "If a nested mapping is passed, all values " "of the top level mapping must be mappings" ) # passed a nested dict/Series to_rep_dict = {} value_dict = {} for k, v in items: keys, values = list(zip(*v.items())) or ([], []) to_rep_dict[k] = list(keys) value_dict[k] = list(values) to_replace, value = to_rep_dict, value_dict else: to_replace, value = keys, values return self.replace( to_replace, value, inplace=inplace, limit=limit, regex=regex ) else: # need a non-zero len on all axes if not self.size: if inplace: return return self.copy() if is_dict_like(to_replace): if is_dict_like(value): # {'A' : NA} -> {'A' : 0} # Note: Checking below for `in foo.keys()` instead of # `in foo` is needed for when we have a Series and not dict mapping = { col: (to_replace[col], value[col]) for col in to_replace.keys() if col in value.keys() and col in self } return self._replace_columnwise(mapping, inplace, regex) # {'A': NA} -> 0 elif not is_list_like(value): # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-like to_replace " "and non-None value" ) mapping = { col: (to_rep, value) for col, to_rep in to_replace.items() } return self._replace_columnwise(mapping, inplace, regex) else: raise TypeError("value argument must be scalar, dict, or Series") elif is_list_like(to_replace): if not is_list_like(value): # e.g. to_replace = [NA, ''] and value is 0, # so we replace NA with 0 and then replace '' with 0 value = [value] * len(to_replace) # e.g. we have to_replace = [NA, ''] and value = [0, 'missing'] if len(to_replace) != len(value): raise ValueError( f"Replacement lists must match in length. " f"Expecting {len(to_replace)} got {len(value)} " ) new_data = self._mgr.replace_list( src_list=to_replace, dest_list=value, inplace=inplace, regex=regex, ) elif to_replace is None: if not ( is_re_compilable(regex) or is_list_like(regex) or is_dict_like(regex) ): raise TypeError( f"'regex' must be a string or a compiled regular expression " f"or a list or dict of strings or regular expressions, " f"you passed a {repr(type(regex).__name__)}" ) return self.replace( regex, value, inplace=inplace, limit=limit, regex=True ) else: # dest iterable dict-like if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-value and " "non-None to_replace" ) mapping = {col: (to_replace, val) for col, val in value.items()} return self._replace_columnwise(mapping, inplace, regex) elif not is_list_like(value): # NA -> 0 regex = should_use_regex(regex, to_replace) if regex: new_data = self._mgr.replace_regex( to_replace=to_replace, value=value, inplace=inplace, ) else: new_data = self._mgr.replace( to_replace=to_replace, value=value, inplace=inplace ) else: raise TypeError( f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}' ) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="replace") def interpolate( self: NDFrameT, method: str = "linear", axis: Axis = 0, limit: int | None = None, inplace: bool_t = False, limit_direction: str | None = None, limit_area: str | None = None, downcast: str | None = None, **kwargs, ) -> NDFrameT | None: """ Fill NaN values using an interpolation method. Please note that only ``method='linear'`` is supported for DataFrame/Series with a MultiIndex. Parameters ---------- method : str, default 'linear' Interpolation technique to use. One of: * 'linear': Ignore the index and treat the values as equally spaced. This is the only method supported on MultiIndexes. * 'time': Works on daily and higher resolution data to interpolate given length of interval. * 'index', 'values': use the actual numerical values of the index. * 'pad': Fill in NaNs using existing values. * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline', 'barycentric', 'polynomial': Passed to `scipy.interpolate.interp1d`. These methods use the numerical values of the index. Both 'polynomial' and 'spline' require that you also specify an `order` (int), e.g. ``df.interpolate(method='polynomial', order=5)``. * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima', 'cubicspline': Wrappers around the SciPy interpolation methods of similar names. See `Notes`. * 'from_derivatives': Refers to `scipy.interpolate.BPoly.from_derivatives` which replaces 'piecewise_polynomial' interpolation method in scipy 0.18. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to interpolate along. limit : int, optional Maximum number of consecutive NaNs to fill. Must be greater than 0. inplace : bool, default False Update the data in place if possible. limit_direction : {{'forward', 'backward', 'both'}}, Optional Consecutive NaNs will be filled in this direction. If limit is specified: * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'. * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be 'backwards'. If 'limit' is not specified: * If 'method' is 'backfill' or 'bfill', the default is 'backward' * else the default is 'forward' .. versionchanged:: 1.1.0 raises ValueError if `limit_direction` is 'forward' or 'both' and method is 'backfill' or 'bfill'. raises ValueError if `limit_direction` is 'backward' or 'both' and method is 'pad' or 'ffill'. limit_area : {{`None`, 'inside', 'outside'}}, default None If limit is specified, consecutive NaNs will be filled with this restriction. * ``None``: No fill restriction. * 'inside': Only fill NaNs surrounded by valid values (interpolate). * 'outside': Only fill NaNs outside valid values (extrapolate). downcast : optional, 'infer' or None, defaults to None Downcast dtypes if possible. ``**kwargs`` : optional Keyword arguments to pass on to the interpolating function. Returns ------- Series or DataFrame or None Returns the same object type as the caller, interpolated at some or all ``NaN`` values or None if ``inplace=True``. See Also -------- fillna : Fill missing values using different methods. scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials (Akima interpolator). scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the Bernstein basis. scipy.interpolate.interp1d : Interpolate a 1-D function. scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh interpolator). scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic interpolation. scipy.interpolate.CubicSpline : Cubic spline data interpolator. Notes ----- The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima' methods are wrappers around the respective SciPy implementations of similar names. These use the actual numerical values of the index. For more information on their behavior, see the `SciPy documentation <https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__. Examples -------- Filling in ``NaN`` in a :class:`~pandas.Series` via linear interpolation. >>> s = pd.Series([0, 1, np.nan, 3]) >>> s 0 0.0 1 1.0 2 NaN 3 3.0 dtype: float64 >>> s.interpolate() 0 0.0 1 1.0 2 2.0 3 3.0 dtype: float64 Filling in ``NaN`` in a Series by padding, but filling at most two consecutive ``NaN`` at a time. >>> s = pd.Series([np.nan, "single_one", np.nan, ... "fill_two_more", np.nan, np.nan, np.nan, ... 4.71, np.nan]) >>> s 0 NaN 1 single_one 2 NaN 3 fill_two_more 4 NaN 5 NaN 6 NaN 7 4.71 8 NaN dtype: object >>> s.interpolate(method='pad', limit=2) 0 NaN 1 single_one 2 single_one 3 fill_two_more 4 fill_two_more 5 fill_two_more 6 NaN 7 4.71 8 4.71 dtype: object Filling in ``NaN`` in a Series via polynomial interpolation or splines: Both 'polynomial' and 'spline' methods require that you also specify an ``order`` (int). >>> s = pd.Series([0, 2, np.nan, 8]) >>> s.interpolate(method='polynomial', order=2) 0 0.000000 1 2.000000 2 4.666667 3 8.000000 dtype: float64 Fill the DataFrame forward (that is, going down) along each column using linear interpolation. Note how the last entry in column 'a' is interpolated differently, because there is no entry after it to use for interpolation. Note how the first entry in column 'b' remains ``NaN``, because there is no entry before it to use for interpolation. >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0), ... (np.nan, 2.0, np.nan, np.nan), ... (2.0, 3.0, np.nan, 9.0), ... (np.nan, 4.0, -4.0, 16.0)], ... columns=list('abcd')) >>> df a b c d 0 0.0 NaN -1.0 1.0 1 NaN 2.0 NaN NaN 2 2.0 3.0 NaN 9.0 3 NaN 4.0 -4.0 16.0 >>> df.interpolate(method='linear', limit_direction='forward', axis=0) a b c d 0 0.0 NaN -1.0 1.0 1 1.0 2.0 -2.0 5.0 2 2.0 3.0 -3.0 9.0 3 2.0 4.0 -4.0 16.0 Using polynomial interpolation. >>> df['d'].interpolate(method='polynomial', order=2) 0 1.0 1 4.0 2 9.0 3 16.0 Name: d, dtype: float64 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) fillna_methods = ["ffill", "bfill", "pad", "backfill"] should_transpose = axis == 1 and method not in fillna_methods obj = self.T if should_transpose else self if obj.empty: return self.copy() if method not in fillna_methods: axis = self._info_axis_number if isinstance(obj.index, MultiIndex) and method != "linear": raise ValueError( "Only `method=linear` interpolation is supported on MultiIndexes." ) # Set `limit_direction` depending on `method` if limit_direction is None: limit_direction = ( "backward" if method in ("backfill", "bfill") else "forward" ) else: if method in ("pad", "ffill") and limit_direction != "forward": raise ValueError( f"`limit_direction` must be 'forward' for method `{method}`" ) if method in ("backfill", "bfill") and limit_direction != "backward": raise ValueError( f"`limit_direction` must be 'backward' for method `{method}`" ) if obj.ndim == 2 and np.all(obj.dtypes == np.dtype("object")): raise TypeError( "Cannot interpolate with all object-dtype columns " "in the DataFrame. Try setting at least one " "column to a numeric dtype." ) # create/use the index if method == "linear": # prior default index = Index(np.arange(len(obj.index))) else: index = obj.index methods = {"index", "values", "nearest", "time"} is_numeric_or_datetime = ( is_numeric_dtype(index.dtype) or is_datetime64_any_dtype(index.dtype) or is_timedelta64_dtype(index.dtype) ) if method not in methods and not is_numeric_or_datetime: raise ValueError( "Index column must be numeric or datetime type when " f"using {method} method other than linear. " "Try setting a numeric or datetime index column before " "interpolating." ) if isna(index).any(): raise NotImplementedError( "Interpolation with NaNs in the index " "has not been implemented. Try filling " "those NaNs before interpolating." ) new_data = obj._mgr.interpolate( method=method, axis=axis, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, inplace=inplace, downcast=downcast, **kwargs, ) result = self._constructor(new_data) if should_transpose: result = result.T if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="interpolate") # ---------------------------------------------------------------------- # Timeseries methods Methods @final def asof(self, where, subset=None): """ Return the last row(s) without any NaNs before `where`. The last row (for each element in `where`, if list) without any NaN is taken. In case of a :class:`~pandas.DataFrame`, the last row without NaN considering only the subset of columns (if not `None`) If there is no good value, NaN is returned for a Series or a Series of NaN values for a DataFrame Parameters ---------- where : date or array-like of dates Date(s) before which the last row(s) are returned. subset : str or array-like of str, default `None` For DataFrame, if not `None`, only use these columns to check for NaNs. Returns ------- scalar, Series, or DataFrame The return can be: * scalar : when `self` is a Series and `where` is a scalar * Series: when `self` is a Series and `where` is an array-like, or when `self` is a DataFrame and `where` is a scalar * DataFrame : when `self` is a DataFrame and `where` is an array-like Return scalar, Series, or DataFrame. See Also -------- merge_asof : Perform an asof merge. Similar to left join. Notes ----- Dates are assumed to be sorted. Raises if this is not the case. Examples -------- A Series and a scalar `where`. >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40]) >>> s 10 1.0 20 2.0 30 NaN 40 4.0 dtype: float64 >>> s.asof(20) 2.0 For a sequence `where`, a Series is returned. The first value is NaN, because the first element of `where` is before the first index value. >>> s.asof([5, 20]) 5 NaN 20 2.0 dtype: float64 Missing values are not considered. The following is ``2.0``, not NaN, even though NaN is at the index location for ``30``. >>> s.asof(30) 2.0 Take all columns into consideration >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50], ... 'b': [None, None, None, None, 500]}, ... index=pd.DatetimeIndex(['2018-02-27 09:01:00', ... '2018-02-27 09:02:00', ... '2018-02-27 09:03:00', ... '2018-02-27 09:04:00', ... '2018-02-27 09:05:00'])) >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30'])) a b 2018-02-27 09:03:30 NaN NaN 2018-02-27 09:04:30 NaN NaN Take a single column into consideration >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30']), ... subset=['a']) a b 2018-02-27 09:03:30 30 NaN 2018-02-27 09:04:30 40 NaN """ if isinstance(where, str): where = Timestamp(where) if not self.index.is_monotonic_increasing: raise ValueError("asof requires a sorted index") is_series = isinstance(self, ABCSeries) if is_series: if subset is not None: raise ValueError("subset is not valid for Series") else: if subset is None: subset = self.columns if not is_list_like(subset): subset = [subset] is_list = is_list_like(where) if not is_list: start = self.index[0] if isinstance(self.index, PeriodIndex): where = Period(where, freq=self.index.freq) if where < start: if not is_series: return self._constructor_sliced( index=self.columns, name=where, dtype=np.float64 ) return np.nan # It's always much faster to use a *while* loop here for # Series than pre-computing all the NAs. However a # *while* loop is extremely expensive for DataFrame # so we later pre-compute all the NAs and use the same # code path whether *where* is a scalar or list. # See PR: https://github.com/pandas-dev/pandas/pull/14476 if is_series: loc = self.index.searchsorted(where, side="right") if loc > 0: loc -= 1 values = self._values while loc > 0 and isna(values[loc]): loc -= 1 return values[loc] if not isinstance(where, Index): where = Index(where) if is_list else Index([where]) nulls = self.isna() if is_series else self[subset].isna().any(1) if nulls.all(): if is_series: self = cast("Series", self) return self._constructor(np.nan, index=where, name=self.name) elif is_list: self = cast("DataFrame", self) return self._constructor(np.nan, index=where, columns=self.columns) else: self = cast("DataFrame", self) return self._constructor_sliced( np.nan, index=self.columns, name=where[0] ) locs = self.index.asof_locs(where, ~(nulls._values)) # mask the missing missing = locs == -1 data = self.take(locs) data.index = where if missing.any(): # GH#16063 only do this setting when necessary, otherwise # we'd cast e.g. bools to floats data.loc[missing] = np.nan return data if is_list else data.iloc[-1] # ---------------------------------------------------------------------- # Action Methods @doc(klass=_shared_doc_kwargs["klass"]) def isna(self: NDFrameT) -> NDFrameT: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or :attr:`numpy.NaN`, gets mapped to True values. Everything else gets mapped to False values. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is an NA value. See Also -------- {klass}.isnull : Alias of isna. {klass}.notna : Boolean inverse of isna. {klass}.dropna : Omit axes labels with missing values. isna : Top-level isna. Examples -------- Show which entries in a DataFrame are NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.isna() age born name toy 0 False True False True 1 False False False False 2 True False False False Show which entries in a Series are NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.isna() 0 False 1 False 2 True dtype: bool """ return isna(self).__finalize__(self, method="isna") @doc(isna, klass=_shared_doc_kwargs["klass"]) def isnull(self: NDFrameT) -> NDFrameT: return isna(self).__finalize__(self, method="isnull") @doc(klass=_shared_doc_kwargs["klass"]) def notna(self: NDFrameT) -> NDFrameT: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to True. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to False values. Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is not an NA value. See Also -------- {klass}.notnull : Alias of notna. {klass}.isna : Boolean inverse of notna. {klass}.dropna : Omit axes labels with missing values. notna : Top-level notna. Examples -------- Show which entries in a DataFrame are not NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.notna() age born name toy 0 True False True False 1 True True True True 2 False True True True Show which entries in a Series are not NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.notna() 0 True 1 True 2 False dtype: bool """ return notna(self).__finalize__(self, method="notna") @doc(notna, klass=_shared_doc_kwargs["klass"]) def notnull(self: NDFrameT) -> NDFrameT: return notna(self).__finalize__(self, method="notnull") @final def _clip_with_scalar(self, lower, upper, inplace: bool_t = False): if (lower is not None and np.any(isna(lower))) or ( upper is not None and np.any(isna(upper)) ): raise ValueError("Cannot use an NA value as a clip threshold") result = self mask = isna(self._values) with np.errstate(all="ignore"): if upper is not None: subset = self <= upper result = result.where(subset, upper, axis=None, inplace=False) if lower is not None: subset = self >= lower result = result.where(subset, lower, axis=None, inplace=False) if np.any(mask): result[mask] = np.nan if inplace: return self._update_inplace(result) else: return result @final def _clip_with_one_bound(self, threshold, method, axis, inplace): if axis is not None: axis = self._get_axis_number(axis) # method is self.le for upper bound and self.ge for lower bound if is_scalar(threshold) and is_number(threshold): if method.__name__ == "le": return self._clip_with_scalar(None, threshold, inplace=inplace) return self._clip_with_scalar(threshold, None, inplace=inplace) # GH #15390 # In order for where method to work, the threshold must # be transformed to NDFrame from other array like structure. if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold): if isinstance(self, ABCSeries): threshold = self._constructor(threshold, index=self.index) else: threshold = align_method_FRAME(self, threshold, axis, flex=None)[1] # GH 40420 # Treat missing thresholds as no bounds, not clipping the values if is_list_like(threshold): fill_value = np.inf if method.__name__ == "le" else -np.inf threshold_inf = threshold.fillna(fill_value) else: threshold_inf = threshold subset = method(threshold_inf, axis=axis) | isna(self) # GH 40420 return self.where(subset, threshold, axis=axis, inplace=inplace) def clip( self: NDFrameT, lower=None, upper=None, axis: Axis | None = None, inplace: bool_t = False, *args, **kwargs, ) -> NDFrameT | None: """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Thresholds can be singular values or array like, and in the latter case the clipping is performed element-wise in the specified axis. Parameters ---------- lower : float or array-like, default None Minimum threshold value. All values below this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. upper : float or array-like, default None Maximum threshold value. All values above this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. axis : int or str axis name, optional Align object with lower and upper along the given axis. inplace : bool, default False Whether to perform the operation in place on the data. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- Series or DataFrame or None Same type as calling object with the values outside the clip boundaries replaced or None if ``inplace=True``. See Also -------- Series.clip : Trim values at input threshold in series. DataFrame.clip : Trim values at input threshold in dataframe. numpy.clip : Clip (limit) the values in an array. Examples -------- >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]} >>> df = pd.DataFrame(data) >>> df col_0 col_1 0 9 -2 1 -3 -7 2 0 6 3 -1 8 4 5 -5 Clips per column using lower and upper thresholds: >>> df.clip(-4, 6) col_0 col_1 0 6 -2 1 -3 -4 2 0 6 3 -1 6 4 5 -4 Clips using specific lower and upper thresholds per column element: >>> t = pd.Series([2, -4, -1, 6, 3]) >>> t 0 2 1 -4 2 -1 3 6 4 3 dtype: int64 >>> df.clip(t, t + 4, axis=0) col_0 col_1 0 6 2 1 -3 -4 2 0 3 3 6 8 4 5 3 Clips using specific lower threshold per column element, with missing values: >>> t = pd.Series([2, -4, np.NaN, 6, 3]) >>> t 0 2.0 1 -4.0 2 NaN 3 6.0 4 3.0 dtype: float64 >>> df.clip(t, axis=0) col_0 col_1 0 9 2 1 -3 -4 2 0 6 3 6 8 4 5 3 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = nv.validate_clip_with_axis(axis, args, kwargs) if axis is not None: axis = self._get_axis_number(axis) # GH 17276 # numpy doesn't like NaN as a clip value # so ignore # GH 19992 # numpy doesn't drop a list-like bound containing NaN isna_lower = isna(lower) if not is_list_like(lower): if np.any(isna_lower): lower = None elif np.all(isna_lower): lower = None isna_upper = isna(upper) if not is_list_like(upper): if np.any(isna_upper): upper = None elif np.all(isna_upper): upper = None # GH 2747 (arguments were reversed) if ( lower is not None and upper is not None and is_scalar(lower) and is_scalar(upper) ): lower, upper = min(lower, upper), max(lower, upper) # fast-path for scalars if (lower is None or (is_scalar(lower) and is_number(lower))) and ( upper is None or (is_scalar(upper) and is_number(upper)) ): return self._clip_with_scalar(lower, upper, inplace=inplace) result = self if lower is not None: result = result._clip_with_one_bound( lower, method=self.ge, axis=axis, inplace=inplace ) if upper is not None: if inplace: result = self result = result._clip_with_one_bound( upper, method=self.le, axis=axis, inplace=inplace ) return result @doc(**_shared_doc_kwargs) def asfreq( self: NDFrameT, freq, method=None, how: str | None = None, normalize: bool_t = False, fill_value=None, ) -> NDFrameT: """ Convert time series to specified frequency. Returns the original data conformed to a new index with the specified frequency. If the index of this {klass} is a :class:`~pandas.PeriodIndex`, the new index is the result of transforming the original index with :meth:`PeriodIndex.asfreq <pandas.PeriodIndex.asfreq>` (so the original index will map one-to-one to the new index). Otherwise, the new index will be equivalent to ``pd.date_range(start, end, freq=freq)`` where ``start`` and ``end`` are, respectively, the first and last entries in the original index (see :func:`pandas.date_range`). The values corresponding to any timesteps in the new index which were not present in the original index will be null (``NaN``), unless a method for filling such unknowns is provided (see the ``method`` parameter below). The :meth:`resample` method is more appropriate if an operation on each group of timesteps (such as an aggregate) is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset or str Frequency DateOffset or string. method : {{'backfill'/'bfill', 'pad'/'ffill'}}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill. how : {{'start', 'end'}}, default end For PeriodIndex only (see PeriodIndex.asfreq). normalize : bool, default False Whether to reset output index to midnight. fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). Returns ------- {klass} {klass} object reindexed to the specified frequency. See Also -------- reindex : Conform DataFrame to new index with optional filling logic. Notes ----- To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({{'s': series}}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0 """ from pandas.core.resample import asfreq return asfreq( self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value, ) @final def at_time(self: NDFrameT, time, asof: bool_t = False, axis=None) -> NDFrameT: """ Select values at particular time of day (e.g., 9:30AM). Parameters ---------- time : datetime.time or str axis : {0 or 'index', 1 or 'columns'}, default 0 Returns ------- Series or DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='12H') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> ts.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") indexer = index.indexer_at_time(time, asof=asof) return self._take_with_is_copy(indexer, axis=axis) @final def between_time( self: NDFrameT, start_time, end_time, include_start: bool_t | lib.NoDefault = lib.no_default, include_end: bool_t | lib.NoDefault = lib.no_default, inclusive: IntervalClosedType | None = None, axis=None, ) -> NDFrameT: """ Select values between particular times of the day (e.g., 9:00-9:30 AM). By setting ``start_time`` to be later than ``end_time``, you can get the times that are *not* between the two times. Parameters ---------- start_time : datetime.time or str Initial time as a time filter limit. end_time : datetime.time or str End time as a time filter limit. include_start : bool, default True Whether the start time needs to be included in the result. .. deprecated:: 1.4.0 Arguments `include_start` and `include_end` have been deprecated to standardize boundary inputs. Use `inclusive` instead, to set each bound as closed or open. include_end : bool, default True Whether the end time needs to be included in the result. .. deprecated:: 1.4.0 Arguments `include_start` and `include_end` have been deprecated to standardize boundary inputs. Use `inclusive` instead, to set each bound as closed or open. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; whether to set each bound as closed or open. axis : {0 or 'index', 1 or 'columns'}, default 0 Determine range time on index or columns value. Returns ------- Series or DataFrame Data from the original object filtered to the specified dates range. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- at_time : Select values at a particular time of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_between_time : Get just the index locations for values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 2018-04-12 01:00:00 4 >>> ts.between_time('0:15', '0:45') A 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 You get the times that are *not* between two times by setting ``start_time`` later than ``end_time``: >>> ts.between_time('0:45', '0:15') A 2018-04-09 00:00:00 1 2018-04-12 01:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") old_include_arg_used = (include_start != lib.no_default) or ( include_end != lib.no_default ) if old_include_arg_used and inclusive is not None: raise ValueError( "Deprecated arguments `include_start` and `include_end` " "cannot be passed if `inclusive` has been given." ) # If any of the deprecated arguments ('include_start', 'include_end') # have been passed elif old_include_arg_used: warnings.warn( "`include_start` and `include_end` are deprecated in " "favour of `inclusive`.", FutureWarning, stacklevel=find_stack_level(), ) left = True if isinstance(include_start, lib.NoDefault) else include_start right = True if isinstance(include_end, lib.NoDefault) else include_end inc_dict: dict[tuple[bool_t, bool_t], IntervalClosedType] = { (True, True): "both", (True, False): "left", (False, True): "right", (False, False): "neither", } inclusive = inc_dict[(left, right)] elif inclusive is None: # On arg removal inclusive can default to "both" inclusive = "both" left_inclusive, right_inclusive = validate_inclusive(inclusive) indexer = index.indexer_between_time( start_time, end_time, include_start=left_inclusive, include_end=right_inclusive, ) return self._take_with_is_copy(indexer, axis=axis) @doc(**_shared_doc_kwargs) def resample( self, rule, axis=0, closed: str | None = None, label: str | None = None, convention: str = "start", kind: str | None = None, loffset=None, base: int | None = None, on=None, level=None, origin: str | TimestampConvertibleTypes = "start_day", offset: TimedeltaConvertibleTypes | None = None, ) -> Resampler: """ Resample time-series data. Convenience method for frequency conversion and resampling of time series. The object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, or `TimedeltaIndex`), or the caller must pass the label of a datetime-like series/index to the ``on``/``level`` keyword parameter. Parameters ---------- rule : DateOffset, Timedelta or str The offset string or object representing target conversion. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Which axis to use for up- or down-sampling. For `Series` this will default to 0, i.e. along the rows. Must be `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. closed : {{'right', 'left'}}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {{'right', 'left'}}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {{'start', 'end', 's', 'e'}}, default 'start' For `PeriodIndex` only, controls whether to use the start or end of `rule`. kind : {{'timestamp', 'period'}}, optional, default None Pass 'timestamp' to convert the resulting index to a `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. By default the input representation is retained. loffset : timedelta, default None Adjust the resampled time labels. .. deprecated:: 1.1.0 You should add the loffset to the `df.index` after the resample. See below. base : int, default 0 For frequencies that evenly subdivide 1 day, the "origin" of the aggregated intervals. For example, for '5min' frequency, base could range from 0 through 4. Defaults to 0. .. deprecated:: 1.1.0 The new arguments that you should use are 'offset' or 'origin'. on : str, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. level : str or int, optional For a MultiIndex, level (name or number) to use for resampling. `level` must be datetime-like. origin : Timestamp or str, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If string, must be one of the following: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries .. versionadded:: 1.1.0 - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day .. versionadded:: 1.3.0 offset : Timedelta or str, default is None An offset timedelta added to the origin. .. versionadded:: 1.1.0 Returns ------- pandas.core.Resampler :class:`~pandas.core.Resampler` object. See Also -------- Series.resample : Resample a Series. DataFrame.resample : Resample a DataFrame. groupby : Group {klass} by mapping, function, label, or list of labels. asfreq : Reindex a {klass} with the given frequency without grouping. Notes ----- See the `user guide <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`__ for more. To learn more about the offset strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__. Examples -------- Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') >>> series = pd.Series(range(9), index=index) >>> series 2000-01-01 00:00:00 0 2000-01-01 00:01:00 1 2000-01-01 00:02:00 2 2000-01-01 00:03:00 3 2000-01-01 00:04:00 4 2000-01-01 00:05:00 5 2000-01-01 00:06:00 6 2000-01-01 00:07:00 7 2000-01-01 00:08:00 8 Freq: T, dtype: int64 Downsample the series into 3 minute bins and sum the values of the timestamps falling into a bin. >>> series.resample('3T').sum() 2000-01-01 00:00:00 3 2000-01-01 00:03:00 12 2000-01-01 00:06:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed value in the resampled bucket with the label ``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', label='right').sum() 2000-01-01 00:03:00 3 2000-01-01 00:06:00 12 2000-01-01 00:09:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> series.resample('3T', label='right', closed='right').sum() 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 2000-01-01 00:06:00 15 2000-01-01 00:09:00 15 Freq: 3T, dtype: int64 Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``pad`` method. >>> series.resample('30S').pad()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 0 2000-01-01 00:01:00 1 2000-01-01 00:01:30 1 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S').bfill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 1 2000-01-01 00:01:00 1 2000-01-01 00:01:30 2 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Pass a custom function via ``apply`` >>> def custom_resampler(arraylike): ... return np.sum(arraylike) + 5 ... >>> series.resample('3T').apply(custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 2000-01-01 00:06:00 26 Freq: 3T, dtype: int64 For a Series with a PeriodIndex, the keyword `convention` can be used to control whether to use the start or end of `rule`. Resample a year by quarter using 'start' `convention`. Values are assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', ... freq='A', ... periods=2)) >>> s 2012 1 2013 2 Freq: A-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN 2012Q3 NaN 2012Q4 NaN 2013Q1 2.0 2013Q2 NaN 2013Q3 NaN 2013Q4 NaN Freq: Q-DEC, dtype: float64 Resample quarters by month using 'end' `convention`. Values are assigned to the last month of the period. >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', ... freq='Q', ... periods=4)) >>> q 2018Q1 1 2018Q2 2 2018Q3 3 2018Q4 4 Freq: Q-DEC, dtype: int64 >>> q.resample('M', convention='end').asfreq() 2018-03 1.0 2018-04 NaN 2018-05 NaN 2018-06 2.0 2018-07 NaN 2018-08 NaN 2018-09 3.0 2018-10 NaN 2018-11 NaN 2018-12 4.0 Freq: M, dtype: float64 For DataFrame objects, the keyword `on` can be used to specify the column instead of the index for resampling. >>> d = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df = pd.DataFrame(d) >>> df['week_starting'] = pd.date_range('01/01/2018', ... periods=8, ... freq='W') >>> df price volume week_starting 0 10 50 2018-01-07 1 11 60 2018-01-14 2 9 40 2018-01-21 3 13 100 2018-01-28 4 14 50 2018-02-04 5 18 100 2018-02-11 6 17 40 2018-02-18 7 19 50 2018-02-25 >>> df.resample('M', on='week_starting').mean() price volume week_starting 2018-01-31 10.75 62.5 2018-02-28 17.00 60.0 For a DataFrame with MultiIndex, the keyword `level` can be used to specify on which level the resampling needs to take place. >>> days = pd.date_range('1/1/2000', periods=4, freq='D') >>> d2 = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df2 = pd.DataFrame( ... d2, ... index=pd.MultiIndex.from_product( ... [days, ['morning', 'afternoon']] ... ) ... ) >>> df2 price volume 2000-01-01 morning 10 50 afternoon 11 60 2000-01-02 morning 9 40 afternoon 13 100 2000-01-03 morning 14 50 afternoon 18 100 2000-01-04 morning 17 40 afternoon 19 50 >>> df2.resample('D', level=0).sum() price volume 2000-01-01 21 110 2000-01-02 22 140 2000-01-03 32 150 2000-01-04 36 90 If you want to adjust the start of the bins based on a fixed timestamp: >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00' >>> rng = pd.date_range(start, end, freq='7min') >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng) >>> ts 2000-10-01 23:30:00 0 2000-10-01 23:37:00 3 2000-10-01 23:44:00 6 2000-10-01 23:51:00 9 2000-10-01 23:58:00 12 2000-10-02 00:05:00 15 2000-10-02 00:12:00 18 2000-10-02 00:19:00 21 2000-10-02 00:26:00 24 Freq: 7T, dtype: int64 >>> ts.resample('17min').sum() 2000-10-01 23:14:00 0 2000-10-01 23:31:00 9 2000-10-01 23:48:00 21 2000-10-02 00:05:00 54 2000-10-02 00:22:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='epoch').sum() 2000-10-01 23:18:00 0 2000-10-01 23:35:00 18 2000-10-01 23:52:00 27 2000-10-02 00:09:00 39 2000-10-02 00:26:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='2000-01-01').sum() 2000-10-01 23:24:00 3 2000-10-01 23:41:00 15 2000-10-01 23:58:00 45 2000-10-02 00:15:00 45 Freq: 17T, dtype: int64 If you want to adjust the start of the bins with an `offset` Timedelta, the two following lines are equivalent: >>> ts.resample('17min', origin='start').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', offset='23h30min').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 If you want to take the largest Timestamp as the end of the bins: >>> ts.resample('17min', origin='end').sum() 2000-10-01 23:35:00 0 2000-10-01 23:52:00 18 2000-10-02 00:09:00 27 2000-10-02 00:26:00 63 Freq: 17T, dtype: int64 In contrast with the `start_day`, you can use `end_day` to take the ceiling midnight of the largest Timestamp as the end of the bins and drop the bins not containing data: >>> ts.resample('17min', origin='end_day').sum() 2000-10-01 23:38:00 3 2000-10-01 23:55:00 15 2000-10-02 00:12:00 45 2000-10-02 00:29:00 45 Freq: 17T, dtype: int64 To replace the use of the deprecated `base` argument, you can now use `offset`, in this example it is equivalent to have `base=2`: >>> ts.resample('17min', offset='2min').sum() 2000-10-01 23:16:00 0 2000-10-01 23:33:00 9 2000-10-01 23:50:00 36 2000-10-02 00:07:00 39 2000-10-02 00:24:00 24 Freq: 17T, dtype: int64 To replace the use of the deprecated `loffset` argument: >>> from pandas.tseries.frequencies import to_offset >>> loffset = '19min' >>> ts_out = ts.resample('17min').sum() >>> ts_out.index = ts_out.index + to_offset(loffset) >>> ts_out 2000-10-01 23:33:00 0 2000-10-01 23:50:00 9 2000-10-02 00:07:00 21 2000-10-02 00:24:00 54 2000-10-02 00:41:00 24 Freq: 17T, dtype: int64 """ from pandas.core.resample import get_resampler axis = self._get_axis_number(axis) return get_resampler( self, freq=rule, label=label, closed=closed, axis=axis, kind=kind, loffset=loffset, convention=convention, base=base, key=on, level=level, origin=origin, offset=offset, ) @final def first(self: NDFrameT, offset) -> NDFrameT: """ Select initial periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the first few rows based on a date offset. Parameters ---------- offset : str, DateOffset or dateutil.relativedelta The offset length of the data that will be selected. For instance, '1M' will display all the rows having their index within the first month. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- last : Select final periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the first 3 days: >>> ts.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calendar days were returned, not the first 3 days observed in the dataset, and therefore data for 2018-04-13 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'first' only supports a DatetimeIndex index") if len(self.index) == 0: return self offset = to_offset(offset) if not isinstance(offset, Tick) and offset.is_on_offset(self.index[0]): # GH#29623 if first value is end of period, remove offset with n = 1 # before adding the real offset end_date = end = self.index[0] - offset.base + offset else: end_date = end = self.index[0] + offset # Tick-like, e.g. 3 weeks if isinstance(offset, Tick) and end_date in self.index: end = self.index.searchsorted(end_date, side="left") return self.iloc[:end] return self.loc[:end] @final def last(self: NDFrameT, offset) -> NDFrameT: """ Select final periods of time series data based on a date offset. For a DataFrame with a sorted DatetimeIndex, this function selects the last few rows based on a date offset. Parameters ---------- offset : str, DateOffset, dateutil.relativedelta The offset length of the data that will be selected. For instance, '3D' will display all the rows having their index within the last 3 days. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- first : Select initial periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> ts.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calendar days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'last' only supports a DatetimeIndex index") if len(self.index) == 0: return self offset = to_offset(offset) start_date = self.index[-1] - offset start = self.index.searchsorted(start_date, side="right") return self.iloc[start:] @final def rank( self: NDFrameT, axis=0, method: str = "average", numeric_only: bool_t | None | lib.NoDefault = lib.no_default, na_option: str = "keep", ascending: bool_t = True, pct: bool_t = False, ) -> NDFrameT: """ Compute numerical data ranks (1 through n) along axis. By default, equal values are assigned a rank that is the average of the ranks of those values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Index to direct ranking. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' How to rank the group of records that have the same value (i.e. ties): * average: average rank of the group * min: lowest rank in the group * max: highest rank in the group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups. numeric_only : bool, optional For DataFrame objects, rank only numeric columns if set to True. na_option : {'keep', 'top', 'bottom'}, default 'keep' How to rank NaN values: * keep: assign NaN rank to NaN values * top: assign lowest rank to NaN values * bottom: assign highest rank to NaN values ascending : bool, default True Whether or not the elements should be ranked in ascending order. pct : bool, default False Whether or not to display the returned rankings in percentile form. Returns ------- same type as caller Return a Series or DataFrame with data ranks as values. See Also -------- core.groupby.GroupBy.rank : Rank of values within each group. Examples -------- >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog', ... 'spider', 'snake'], ... 'Number_legs': [4, 2, 4, 8, np.nan]}) >>> df Animal Number_legs 0 cat 4.0 1 penguin 2.0 2 dog 4.0 3 spider 8.0 4 snake NaN Ties are assigned the mean of the ranks (by default) for the group. >>> s = pd.Series(range(5), index=list("abcde")) >>> s["d"] = s["b"] >>> s.rank() a 1.0 b 2.5 c 4.0 d 2.5 e 5.0 dtype: float64 The following example shows how the method behaves with the above parameters: * default_rank: this is the default behaviour obtained without using any parameter. * max_rank: setting ``method = 'max'`` the records that have the same values are ranked using the highest rank (e.g.: since 'cat' and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.) * NA_bottom: choosing ``na_option = 'bottom'``, if there are records with NaN values they are placed at the bottom of the ranking. * pct_rank: when setting ``pct = True``, the ranking is expressed as percentile rank. >>> df['default_rank'] = df['Number_legs'].rank() >>> df['max_rank'] = df['Number_legs'].rank(method='max') >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom') >>> df['pct_rank'] = df['Number_legs'].rank(pct=True) >>> df Animal Number_legs default_rank max_rank NA_bottom pct_rank 0 cat 4.0 2.5 3.0 2.5 0.625 1 penguin 2.0 1.0 1.0 1.0 0.250 2 dog 4.0 2.5 3.0 2.5 0.625 3 spider 8.0 4.0 4.0 4.0 1.000 4 snake NaN NaN NaN 5.0 NaN """ warned = False if numeric_only is None: # GH#45036 warnings.warn( f"'numeric_only=None' in {type(self).__name__}.rank is deprecated " "and will raise in a future version. Pass either 'True' or " "'False'. 'False' will be the default.", FutureWarning, stacklevel=find_stack_level(), ) warned = True elif numeric_only is lib.no_default: numeric_only = None axis = self._get_axis_number(axis) if na_option not in {"keep", "top", "bottom"}: msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) def ranker(data): if data.ndim == 2: # i.e. DataFrame, we cast to ndarray values = data.values else: # i.e. Series, can dispatch to EA values = data._values if isinstance(values, ExtensionArray): ranks = values._rank( axis=axis, method=method, ascending=ascending, na_option=na_option, pct=pct, ) else: ranks = algos.rank( values, axis=axis, method=method, ascending=ascending, na_option=na_option, pct=pct, ) ranks_obj = self._constructor(ranks, **data._construct_axes_dict()) return ranks_obj.__finalize__(self, method="rank") # if numeric_only is None, and we can't get anything, we try with # numeric_only=True if numeric_only is None: try: return ranker(self) except TypeError: numeric_only = True if not warned: # Only warn here if we didn't already issue a warning above # GH#45036 warnings.warn( f"Dropping of nuisance columns in {type(self).__name__}.rank " "is deprecated; in a future version this will raise TypeError. " "Select only valid columns before calling rank.", FutureWarning, stacklevel=find_stack_level(), ) if numeric_only: data = self._get_numeric_data() else: data = self return ranker(data) @doc(_shared_docs["compare"], klass=_shared_doc_kwargs["klass"]) def compare( self, other, align_axis: Axis = 1, keep_shape: bool_t = False, keep_equal: bool_t = False, ): from pandas.core.reshape.concat import concat if type(self) is not type(other): cls_self, cls_other = type(self).__name__, type(other).__name__ raise TypeError( f"can only compare '{cls_self}' (not '{cls_other}') with '{cls_self}'" ) mask = ~((self == other) | (self.isna() & other.isna())) keys = ["self", "other"] if not keep_equal: self = self.where(mask) other = other.where(mask) if not keep_shape: if isinstance(self, ABCDataFrame): cmask = mask.any() rmask = mask.any(axis=1) self = self.loc[rmask, cmask] other = other.loc[rmask, cmask] else: self = self[mask] other = other[mask] if align_axis in (1, "columns"): # This is needed for Series axis = 1 else: axis = self._get_axis_number(align_axis) diff = concat([self, other], axis=axis, keys=keys) if axis >= self.ndim: # No need to reorganize data if stacking on new axis # This currently applies for stacking two Series on columns return diff ax = diff._get_axis(axis) ax_names = np.array(ax.names) # set index names to positions to avoid confusion ax.names = np.arange(len(ax_names)) # bring self-other to inner level order = list(range(1, ax.nlevels)) + [0] if isinstance(diff, ABCDataFrame): diff = diff.reorder_levels(order, axis=axis) else: diff = diff.reorder_levels(order) # restore the index names in order diff._get_axis(axis=axis).names = ax_names[order] # reorder axis to keep things organized indices = ( np.arange(diff.shape[axis]).reshape([2, diff.shape[axis] // 2]).T.flatten() ) diff = diff.take(indices, axis=axis) return diff @doc(**_shared_doc_kwargs) def align( self, other, join="outer", axis=None, level=None, copy=True, fill_value=None, method=None, limit=None, fill_axis=0, broadcast_axis=None, ): """ Align two objects on their axes with the specified join method. Join method is specified for each axis Index. Parameters ---------- other : DataFrame or Series join : {{'outer', 'inner', 'left', 'right'}}, default 'outer' axis : allowed axis of the other object, default None Align on index (0), columns (1), or both (None). level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. copy : bool, default True Always returns new objects. If copy=False and no reindexing is required then original objects are returned. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: - pad / ffill: propagate last valid observation forward to next valid. - backfill / bfill: use NEXT valid observation to fill gap. limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. fill_axis : {axes_single_arg}, default 0 Filling axis, method and limit. broadcast_axis : {axes_single_arg}, default None Broadcast values along this axis, if aligning two objects of different dimensions. Returns ------- (left, right) : ({klass}, type of other) Aligned objects. Examples -------- >>> df = pd.DataFrame( ... [[1, 2, 3, 4], [6, 7, 8, 9]], columns=["D", "B", "E", "A"], index=[1, 2] ... ) >>> other = pd.DataFrame( ... [[10, 20, 30, 40], [60, 70, 80, 90], [600, 700, 800, 900]], ... columns=["A", "B", "C", "D"], ... index=[2, 3, 4], ... ) >>> df D B E A 1 1 2 3 4 2 6 7 8 9 >>> other A B C D 2 10 20 30 40 3 60 70 80 90 4 600 700 800 900 Align on columns: >>> left, right = df.align(other, join="outer", axis=1) >>> left A B C D E 1 4 2 NaN 1 3 2 9 7 NaN 6 8 >>> right A B C D E 2 10 20 30 40 NaN 3 60 70 80 90 NaN 4 600 700 800 900 NaN We can also align on the index: >>> left, right = df.align(other, join="outer", axis=0) >>> left D B E A 1 1.0 2.0 3.0 4.0 2 6.0 7.0 8.0 9.0 3 NaN NaN NaN NaN 4 NaN NaN NaN NaN >>> right A B C D 1 NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 3 60.0 70.0 80.0 90.0 4 600.0 700.0 800.0 900.0 Finally, the default `axis=None` will align on both index and columns: >>> left, right = df.align(other, join="outer", axis=None) >>> left A B C D E 1 4.0 2.0 NaN 1.0 3.0 2 9.0 7.0 NaN 6.0 8.0 3 NaN NaN NaN NaN NaN 4 NaN NaN NaN NaN NaN >>> right A B C D E 1 NaN NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 NaN 3 60.0 70.0 80.0 90.0 NaN 4 600.0 700.0 800.0 900.0 NaN """ method = missing.clean_fill_method(method) if broadcast_axis == 1 and self.ndim != other.ndim: if isinstance(self, ABCSeries): # this means other is a DataFrame, and we need to broadcast # self cons = self._constructor_expanddim df = cons( {c: self for c in other.columns}, **other._construct_axes_dict() ) return df._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): # this means self is a DataFrame, and we need to broadcast # other cons = other._constructor_expanddim df = cons( {c: other for c in self.columns}, **self._construct_axes_dict() ) return self._align_frame( df, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) if axis is not None: axis = self._get_axis_number(axis) if isinstance(other, ABCDataFrame): return self._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): return self._align_series( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") @final def _align_frame( self, other, join="outer", axis=None, level=None, copy: bool_t = True, fill_value=None, method=None, limit=None, fill_axis=0, ): # defaults join_index, join_columns = None, None ilidx, iridx = None, None clidx, cridx = None, None is_series = isinstance(self, ABCSeries) if (axis is None or axis == 0) and not self.index.equals(other.index): join_index, ilidx, iridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if ( (axis is None or axis == 1) and not is_series and not self.columns.equals(other.columns) ): join_columns, clidx, cridx = self.columns.join( other.columns, how=join, level=level, return_indexers=True ) if is_series: reindexers = {0: [join_index, ilidx]} else: reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]} left = self._reindex_with_indexers( reindexers, copy=copy, fill_value=fill_value, allow_dups=True ) # other must be always DataFrame right = other._reindex_with_indexers( {0: [join_index, iridx], 1: [join_columns, cridx]}, copy=copy, fill_value=fill_value, allow_dups=True, ) if method is not None: _left = left.fillna(method=method, axis=fill_axis, limit=limit) assert _left is not None # needed for mypy left = _left right = right.fillna(method=method, axis=fill_axis, limit=limit) # if DatetimeIndex have different tz, convert to UTC left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) @final def _align_series( self, other, join="outer", axis=None, level=None, copy: bool_t = True, fill_value=None, method=None, limit=None, fill_axis=0, ): is_series = isinstance(self, ABCSeries) if (not is_series and axis is None) or axis not in [None, 0, 1]: raise ValueError("Must specify axis=0 or 1") if is_series and axis == 1: raise ValueError("cannot align series to a series other than axis 0") # series/series compat, other must always be a Series if not axis: # equal if self.index.equals(other.index): join_index, lidx, ridx = None, None, None else: join_index, lidx, ridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if is_series: left = self._reindex_indexer(join_index, lidx, copy) elif lidx is None or join_index is None: left = self.copy() if copy else self else: left = self._constructor( self._mgr.reindex_indexer(join_index, lidx, axis=1, copy=copy) ) right = other._reindex_indexer(join_index, ridx, copy) else: # one has > 1 ndim fdata = self._mgr join_index = self.axes[1] lidx, ridx = None, None if not join_index.equals(other.index): join_index, lidx, ridx = join_index.join( other.index, how=join, level=level, return_indexers=True ) if lidx is not None: bm_axis = self._get_block_manager_axis(1) fdata = fdata.reindex_indexer(join_index, lidx, axis=bm_axis) if copy and fdata is self._mgr: fdata = fdata.copy() left = self._constructor(fdata) if ridx is None: right = other else: right = other.reindex(join_index, level=level) # fill fill_na = notna(fill_value) or (method is not None) if fill_na: left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis) right = right.fillna(fill_value, method=method, limit=limit) # if DatetimeIndex have different tz, convert to UTC if is_series or (not is_series and axis == 0): left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) @final def _where( self, cond, other=lib.no_default, inplace=False, axis=None, level=None, errors="raise", ): """ Equivalent to public method `where`, except that `other` is not applied as a function even if callable. Used in __setitem__. """ inplace = validate_bool_kwarg(inplace, "inplace") if axis is not None: axis = self._get_axis_number(axis) # align the cond to same shape as myself cond = com.apply_if_callable(cond, self) if isinstance(cond, NDFrame): cond, _ = cond.align(self, join="right", broadcast_axis=1, copy=False) else: if not hasattr(cond, "shape"): cond = np.asanyarray(cond) if cond.shape != self.shape: raise ValueError("Array conditional must be same shape as self") cond = self._constructor(cond, **self._construct_axes_dict()) # make sure we are boolean fill_value = bool(inplace) cond = cond.fillna(fill_value) msg = "Boolean array expected for the condition, not {dtype}" if not cond.empty: if not isinstance(cond, ABCDataFrame): # This is a single-dimensional object. if not is_bool_dtype(cond): raise ValueError(msg.format(dtype=cond.dtype)) else: for dt in cond.dtypes: if not is_bool_dtype(dt): raise ValueError(msg.format(dtype=dt)) else: # GH#21947 we have an empty DataFrame/Series, could be object-dtype cond = cond.astype(bool) cond = -cond if inplace else cond cond = cond.reindex(self._info_axis, axis=self._info_axis_number, copy=False) # try to align with other if isinstance(other, NDFrame): # align with me if other.ndim <= self.ndim: _, other = self.align( other, join="left", axis=axis, level=level, fill_value=None, copy=False, ) # if we are NOT aligned, raise as we cannot where index if axis is None and not other._indexed_same(self): raise InvalidIndexError elif other.ndim < self.ndim: # TODO(EA2D): avoid object-dtype cast in EA case GH#38729 other = other._values if axis == 0: other = np.reshape(other, (-1, 1)) elif axis == 1: other = np.reshape(other, (1, -1)) other = np.broadcast_to(other, self.shape) # slice me out of the other else: raise NotImplementedError( "cannot align with a higher dimensional NDFrame" ) elif not isinstance(other, (MultiIndex, NDFrame)): # mainly just catching Index here other = extract_array(other, extract_numpy=True) if isinstance(other, (np.ndarray, ExtensionArray)): if other.shape != self.shape: if self.ndim != 1: # In the ndim == 1 case we may have # other length 1, which we treat as scalar (GH#2745, GH#4192) # or len(other) == icond.sum(), which we treat like # __setitem__ (GH#3235) raise ValueError( "other must be the same shape as self when an ndarray" ) # we are the same shape, so create an actual object for alignment else: other = self._constructor(other, **self._construct_axes_dict()) if axis is None: axis = 0 if self.ndim == getattr(other, "ndim", 0): align = True else: align = self._get_axis_number(axis) == 1 if inplace: # we may have different type blocks come out of putmask, so # reconstruct the block manager self._check_inplace_setting(other) new_data = self._mgr.putmask(mask=cond, new=other, align=align) result = self._constructor(new_data) return self._update_inplace(result) else: new_data = self._mgr.where( other=other, cond=cond, align=align, ) result = self._constructor(new_data) return result.__finalize__(self) @doc( klass=_shared_doc_kwargs["klass"], cond="True", cond_rev="False", name="where", name_other="mask", ) def where( self, cond, other=np.nan, inplace=False, axis=None, level=None, errors="raise", try_cast=lib.no_default, ): """ Replace values where the condition is {cond_rev}. Parameters ---------- cond : bool {klass}, array-like, or callable Where `cond` is {cond}, keep the original value. Where {cond_rev}, replace with corresponding value from `other`. If `cond` is callable, it is computed on the {klass} and should return boolean {klass} or array. The callable must not change input {klass} (though pandas doesn't check it). other : scalar, {klass}, or callable Entries where `cond` is {cond_rev} are replaced with corresponding value from `other`. If other is callable, it is computed on the {klass} and should return scalar or {klass}. The callable must not change input {klass} (though pandas doesn't check it). inplace : bool, default False Whether to perform the operation in place on the data. axis : int, default None Alignment axis if needed. level : int, default None Alignment level if needed. errors : str, {{'raise', 'ignore'}}, default 'raise' Note that currently this parameter won't affect the results and will always coerce to a suitable dtype. - 'raise' : allow exceptions to be raised. - 'ignore' : suppress exceptions. On error return original object. try_cast : bool, default None Try to cast the result back to the input type (if possible). .. deprecated:: 1.3.0 Manually cast back if necessary. Returns ------- Same type as caller or None if ``inplace=True``. See Also -------- :func:`DataFrame.{name_other}` : Return an object of same shape as self. Notes ----- The {name} method is an application of the if-then idiom. For each element in the calling DataFrame, if ``cond`` is ``{cond}`` the element is used; otherwise the corresponding element from the DataFrame ``other`` is used. The signature for :func:`DataFrame.where` differs from :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to ``np.where(m, df1, df2)``. For further details and examples see the ``{name}`` documentation in :ref:`indexing <indexing.where_mask>`. Examples -------- >>> s = pd.Series(range(5)) >>> s.where(s > 0) 0 NaN 1 1.0 2 2.0 3 3.0 4 4.0 dtype: float64 >>> s.mask(s > 0) 0 0.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 >>> s.where(s > 1, 10) 0 10 1 10 2 2 3 3 4 4 dtype: int64 >>> s.mask(s > 1, 10) 0 0 1 1 2 10 3 10 4 10 dtype: int64 >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B']) >>> df A B 0 0 1 1 2 3 2 4 5 3 6 7 4 8 9 >>> m = df % 3 == 0 >>> df.where(m, -df) A B 0 0 -1 1 -2 3 2 -4 -5 3 6 -7 4 -8 9 >>> df.where(m, -df) == np.where(m, df, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True >>> df.where(m, -df) == df.mask(~m, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True """ other = com.apply_if_callable(other, self) if try_cast is not lib.no_default: warnings.warn( "try_cast keyword is deprecated and will be removed in a " "future version.", FutureWarning, stacklevel=find_stack_level(), ) return self._where(cond, other, inplace, axis, level, errors=errors) @doc( where, klass=_shared_doc_kwargs["klass"], cond="False", cond_rev="True", name="mask", name_other="where", ) def mask( self, cond, other=np.nan, inplace=False, axis=None, level=None, errors="raise", try_cast=lib.no_default, ): inplace = validate_bool_kwarg(inplace, "inplace") cond = com.apply_if_callable(cond, self) if try_cast is not lib.no_default: warnings.warn( "try_cast keyword is deprecated and will be removed in a " "future version.", FutureWarning, stacklevel=find_stack_level(), ) # see gh-21891 if not hasattr(cond, "__invert__"): cond = np.array(cond) return self.where( ~cond, other=other, inplace=inplace, axis=axis, level=level, errors=errors, ) @doc(klass=_shared_doc_kwargs["klass"]) def shift( self: NDFrameT, periods=1, freq=None, axis=0, fill_value=None ) -> NDFrameT: """ Shift index by desired number of periods with an optional time `freq`. When `freq` is not passed, shift the index without realigning the data. If `freq` is passed (in this case, the index must be date or datetime, or it will raise a `NotImplementedError`), the index will be increased using the periods and the `freq`. `freq` can be inferred when specified as "infer" as long as either freq or inferred_freq attribute is set in the index. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. freq : DateOffset, tseries.offsets, timedelta, or str, optional Offset to use from the tseries module or time rule (e.g. 'EOM'). If `freq` is specified then the index values are shifted but the data is not realigned. That is, use `freq` if you would like to extend the index when shifting and preserve the original data. If `freq` is specified as "infer" then it will be inferred from the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown. axis : {{0 or 'index', 1 or 'columns', None}}, default None Shift direction. fill_value : object, optional The scalar value to use for newly introduced missing values. the default depends on the dtype of `self`. For numeric data, ``np.nan`` is used. For datetime, timedelta, or period data, etc. :attr:`NaT` is used. For extension dtypes, ``self.dtype.na_value`` is used. .. versionchanged:: 1.1.0 Returns ------- {klass} Copy of input object, shifted. See Also -------- Index.shift : Shift values of Index. DatetimeIndex.shift : Shift values of DatetimeIndex. PeriodIndex.shift : Shift values of PeriodIndex. tshift : Shift the time index, using the index's frequency if available. Examples -------- >>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45], ... "Col2": [13, 23, 18, 33, 48], ... "Col3": [17, 27, 22, 37, 52]}}, ... index=pd.date_range("2020-01-01", "2020-01-05")) >>> df Col1 Col2 Col3 2020-01-01 10 13 17 2020-01-02 20 23 27 2020-01-03 15 18 22 2020-01-04 30 33 37 2020-01-05 45 48 52 >>> df.shift(periods=3) Col1 Col2 Col3 2020-01-01 NaN NaN NaN 2020-01-02 NaN NaN NaN 2020-01-03 NaN NaN NaN 2020-01-04 10.0 13.0 17.0 2020-01-05 20.0 23.0 27.0 >>> df.shift(periods=1, axis="columns") Col1 Col2 Col3 2020-01-01 NaN 10 13 2020-01-02 NaN 20 23 2020-01-03 NaN 15 18 2020-01-04 NaN 30 33 2020-01-05 NaN 45 48 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 2020-01-01 0 0 0 2020-01-02 0 0 0 2020-01-03 0 0 0 2020-01-04 10 13 17 2020-01-05 20 23 27 >>> df.shift(periods=3, freq="D") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 >>> df.shift(periods=3, freq="infer") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 """ if periods == 0: return self.copy() if freq is None: # when freq is None, data is shifted, index is not axis = self._get_axis_number(axis) new_data = self._mgr.shift( periods=periods, axis=axis, fill_value=fill_value ) return self._constructor(new_data).__finalize__(self, method="shift") # when freq is given, index is shifted, data is not index = self._get_axis(axis) if freq == "infer": freq = getattr(index, "freq", None) if freq is None: freq = getattr(index, "inferred_freq", None) if freq is None: msg = "Freq was not set in the index hence cannot be inferred" raise ValueError(msg) elif isinstance(freq, str): freq = to_offset(freq) if isinstance(index, PeriodIndex): orig_freq = to_offset(index.freq) if freq != orig_freq: assert orig_freq is not None # for mypy raise ValueError( f"Given freq {freq.rule_code} does not match " f"PeriodIndex freq {orig_freq.rule_code}" ) new_ax = index.shift(periods) else: new_ax = index.shift(periods, freq) result = self.set_axis(new_ax, axis=axis) return result.__finalize__(self, method="shift") @final def slice_shift(self: NDFrameT, periods: int = 1, axis=0) -> NDFrameT: """ Equivalent to `shift` without copying data. The shifted data will not include the dropped periods and the shifted axis will be smaller than the original. .. deprecated:: 1.2.0 slice_shift is deprecated, use DataFrame/Series.shift instead. Parameters ---------- periods : int Number of periods to move, can be positive or negative. Returns ------- shifted : same type as caller Notes ----- While the `slice_shift` is faster than `shift`, you may pay for it later during alignment. """ msg = ( "The 'slice_shift' method is deprecated " "and will be removed in a future version. " "You can use DataFrame/Series.shift instead." ) warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) if periods == 0: return self if periods > 0: vslicer = slice(None, -periods) islicer = slice(periods, None) else: vslicer = slice(-periods, None) islicer = slice(None, periods) new_obj = self._slice(vslicer, axis=axis) shifted_axis = self._get_axis(axis)[islicer] new_obj.set_axis(shifted_axis, axis=axis, inplace=True) return new_obj.__finalize__(self, method="slice_shift") @final def tshift(self: NDFrameT, periods: int = 1, freq=None, axis: Axis = 0) -> NDFrameT: """ Shift the time index, using the index's frequency if available. .. deprecated:: 1.1.0 Use `shift` instead. Parameters ---------- periods : int Number of periods to move, can be positive or negative. freq : DateOffset, timedelta, or str, default None Increment to use from the tseries module or time rule expressed as a string (e.g. 'EOM'). axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0 Corresponds to the axis that contains the Index. Returns ------- shifted : Series/DataFrame Notes ----- If freq is not specified then tries to use the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown """ warnings.warn( ( "tshift is deprecated and will be removed in a future version. " "Please use shift instead." ), FutureWarning, stacklevel=find_stack_level(), ) if freq is None: freq = "infer" return self.shift(periods, freq, axis) def truncate( self: NDFrameT, before=None, after=None, axis=None, copy: bool_t = True ) -> NDFrameT: """ Truncate a Series or DataFrame before and after some index value. This is a useful shorthand for boolean indexing based on index values above or below certain thresholds. Parameters ---------- before : date, str, int Truncate all rows before this index value. after : date, str, int Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'}, optional Axis to truncate. Truncates the index (rows) by default. copy : bool, default is True, Return a copy of the truncated section. Returns ------- type of caller The truncated Series or DataFrame. See Also -------- DataFrame.loc : Select a subset of a DataFrame by label. DataFrame.iloc : Select a subset of a DataFrame by position. Notes ----- If the index being truncated contains only datetime values, `before` and `after` may be specified as strings instead of Timestamps. Examples -------- >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) >>> df A B C 1 a f k 2 b g l 3 c h m 4 d i n 5 e j o >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n The columns of a DataFrame can be truncated. >>> df.truncate(before="A", after="B", axis="columns") A B 1 a f 2 b g 3 c h 4 d i 5 e j For Series, only rows can be truncated. >>> df['A'].truncate(before=2, after=4) 2 b 3 c 4 d Name: A, dtype: object The index values in ``truncate`` can be datetimes or string dates. >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) >>> df.tail() A 2016-01-31 23:59:56 1 2016-01-31 23:59:57 1 2016-01-31 23:59:58 1 2016-01-31 23:59:59 1 2016-02-01 00:00:00 1 >>> df.truncate(before=pd.Timestamp('2016-01-05'), ... after=pd.Timestamp('2016-01-10')).tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Because the index is a DatetimeIndex containing only dates, we can specify `before` and `after` as strings. They will be coerced to Timestamps before truncation. >>> df.truncate('2016-01-05', '2016-01-10').tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Note that ``truncate`` assumes a 0 value for any unspecified time component (midnight). This differs from partial string slicing, which returns any partially matching dates. >>> df.loc['2016-01-05':'2016-01-10', :].tail() A 2016-01-10 23:59:55 1 2016-01-10 23:59:56 1 2016-01-10 23:59:57 1 2016-01-10 23:59:58 1 2016-01-10 23:59:59 1 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) ax = self._get_axis(axis) # GH 17935 # Check that index is sorted if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing: raise ValueError("truncate requires a sorted index") # if we have a date index, convert to dates, otherwise # treat like a slice if ax._is_all_dates: from pandas.core.tools.datetimes import to_datetime before = to_datetime(before) after = to_datetime(after) if before is not None and after is not None and before > after: raise ValueError(f"Truncate: {after} must be after {before}") if len(ax) > 1 and ax.is_monotonic_decreasing and ax.nunique() > 1: before, after = after, before slicer = [slice(None, None)] * self._AXIS_LEN slicer[axis] = slice(before, after) result = self.loc[tuple(slicer)] if isinstance(ax, MultiIndex): setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) if copy: result = result.copy() return result @final def tz_convert( self: NDFrameT, tz, axis=0, level=None, copy: bool_t = True ) -> NDFrameT: """ Convert tz-aware axis to target time zone. Parameters ---------- tz : str or tzinfo object axis : the axis to convert level : int, str, default None If axis is a MultiIndex, convert a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. Returns ------- {klass} Object with time zone converted axis. Raises ------ TypeError If the axis is tz-naive. """ axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_convert(ax, tz): if not hasattr(ax, "tz_convert"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) else: ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_convert(tz) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_convert(ax.levels[level], tz) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_convert(ax, tz) result = self.copy(deep=copy) result = result.set_axis(ax, axis=axis, inplace=False) return result.__finalize__(self, method="tz_convert") @final def tz_localize( self: NDFrameT, tz, axis=0, level=None, copy: bool_t = True, ambiguous="raise", nonexistent: str = "raise", ) -> NDFrameT: """ Localize tz-naive index of a Series or DataFrame to target time zone. This operation localizes the Index. To localize the values in a timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- tz : str or tzinfo axis : the axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False designates a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times. nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. Valid values are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times. Returns ------- Series or DataFrame Same type as the input. Raises ------ TypeError If the TimeSeries is tz-aware and tz is not None. Examples -------- Localize local times: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00'])) >>> s.tz_localize('CET') 2018-09-15 01:30:00+02:00 1 dtype: int64 Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.Series(range(7), ... index=pd.DatetimeIndex(['2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.tz_localize('CET', ambiguous='infer') 2018-10-28 01:30:00+02:00 0 2018-10-28 02:00:00+02:00 1 2018-10-28 02:30:00+02:00 2 2018-10-28 02:00:00+01:00 3 2018-10-28 02:30:00+01:00 4 2018-10-28 03:00:00+01:00 5 2018-10-28 03:30:00+01:00 6 dtype: int64 In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.Series(range(3), ... index=pd.DatetimeIndex(['2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) 2018-10-28 01:20:00+02:00 0 2018-10-28 02:36:00+02:00 1 2018-10-28 03:46:00+01:00 2 dtype: int64 If the DST transition causes nonexistent times, you can shift these dates forward or backward with a timedelta object or `'shift_forward'` or `'shift_backward'`. >>> s = pd.Series(range(2), ... index=pd.DatetimeIndex(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 2015-03-29 03:00:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 2015-03-29 01:59:59.999999999+01:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 2015-03-29 03:30:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 """ nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward") if nonexistent not in nonexistent_options and not isinstance( nonexistent, timedelta ): raise ValueError( "The nonexistent argument must be one of 'raise', " "'NaT', 'shift_forward', 'shift_backward' or " "a timedelta object" ) axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_localize(ax, tz, ambiguous, nonexistent): if not hasattr(ax, "tz_localize"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) else: ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self.copy(deep=copy) result = result.set_axis(ax, axis=axis, inplace=False) return result.__finalize__(self, method="tz_localize") # ---------------------------------------------------------------------- # Numeric Methods @final def describe( self: NDFrameT, percentiles=None, include=None, exclude=None, datetime_is_numeric=False, ) -> NDFrameT: """ Generate descriptive statistics. Descriptive statistics include those that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list-like of numbers, optional The percentiles to include in the output. All should fall between 0 and 1. The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and 75th percentiles. include : 'all', list-like of dtypes or None (default), optional A white list of data types to include in the result. Ignored for ``Series``. Here are the options: - 'all' : All columns of the input will be included in the output. - A list-like of dtypes : Limits the results to the provided data types. To limit the result to numeric types submit ``numpy.number``. To limit it instead to object columns submit the ``numpy.object`` data type. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To select pandas categorical columns, use ``'category'`` - None (default) : The result will include all numeric columns. exclude : list-like of dtypes or None (default), optional, A black list of data types to omit from the result. Ignored for ``Series``. Here are the options: - A list-like of dtypes : Excludes the provided data types from the result. To exclude numeric types submit ``numpy.number``. To exclude object columns submit the data type ``numpy.object``. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(exclude=['O'])``). To exclude pandas categorical columns, use ``'category'`` - None (default) : The result will exclude nothing. datetime_is_numeric : bool, default False Whether to treat datetime dtypes as numeric. This affects statistics calculated for the column. For DataFrame input, this also controls whether datetime columns are included by default. .. versionadded:: 1.1.0 Returns ------- Series or DataFrame Summary statistics of the Series or Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the observations. DataFrame.select_dtypes: Subset of a DataFrame including/excluding columns based on their dtype. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and upper percentiles. By default the lower percentile is ``25`` and the upper percentile is ``75``. The ``50`` percentile is the same as the median. For object data (e.g. strings or timestamps), the result's index will include ``count``, ``unique``, ``top``, and ``freq``. The ``top`` is the most common value. The ``freq`` is the most common value's frequency. Timestamps also include the ``first`` and ``last`` items. If multiple object values have the highest count, then the ``count`` and ``top`` results will be arbitrarily chosen from among those with the highest count. For mixed data types provided via a ``DataFrame``, the default is to return only an analysis of numeric columns. If the dataframe consists only of object and categorical data without any numeric columns, the default is to return an analysis of both the object and categorical columns. If ``include='all'`` is provided as an option, the result will include a union of attributes of each type. The `include` and `exclude` parameters can be used to limit which columns in a ``DataFrame`` are analyzed for the output. The parameters are ignored when analyzing a ``Series``. Examples -------- Describing a numeric ``Series``. >>> s = pd.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 dtype: float64 Describing a categorical ``Series``. >>> s = pd.Series(['a', 'a', 'b', 'c']) >>> s.describe() count 4 unique 3 top a freq 2 dtype: object Describing a timestamp ``Series``. >>> s = pd.Series([ ... np.datetime64("2000-01-01"), ... np.datetime64("2010-01-01"), ... np.datetime64("2010-01-01") ... ]) >>> s.describe(datetime_is_numeric=True) count 3 mean 2006-09-01 08:00:00 min 2000-01-01 00:00:00 25% 2004-12-31 12:00:00 50% 2010-01-01 00:00:00 75% 2010-01-01 00:00:00 max 2010-01-01 00:00:00 dtype: object Describing a ``DataFrame``. By default only numeric fields are returned. >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']), ... 'numeric': [1, 2, 3], ... 'object': ['a', 'b', 'c'] ... }) >>> df.describe() numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Describing all columns of a ``DataFrame`` regardless of data type. >>> df.describe(include='all') # doctest: +SKIP categorical numeric object count 3 3.0 3 unique 3 NaN 3 top f NaN a freq 1 NaN 1 mean NaN 2.0 NaN std NaN 1.0 NaN min NaN 1.0 NaN 25% NaN 1.5 NaN 50% NaN 2.0 NaN 75% NaN 2.5 NaN max NaN 3.0 NaN Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Name: numeric, dtype: float64 Including only numeric columns in a ``DataFrame`` description. >>> df.describe(include=[np.number]) numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Including only string columns in a ``DataFrame`` description. >>> df.describe(include=[object]) # doctest: +SKIP object count 3 unique 3 top a freq 1 Including only categorical columns from a ``DataFrame`` description. >>> df.describe(include=['category']) categorical count 3 unique 3 top d freq 1 Excluding numeric columns from a ``DataFrame`` description. >>> df.describe(exclude=[np.number]) # doctest: +SKIP categorical object count 3 3 unique 3 3 top f a freq 1 1 Excluding object columns from a ``DataFrame`` description. >>> df.describe(exclude=[object]) # doctest: +SKIP categorical numeric count 3 3.0 unique 3 NaN top f NaN freq 1 NaN mean NaN 2.0 std NaN 1.0 min NaN 1.0 25% NaN 1.5 50% NaN 2.0 75% NaN 2.5 max NaN 3.0 """ return describe_ndframe( obj=self, include=include, exclude=exclude, datetime_is_numeric=datetime_is_numeric, percentiles=percentiles, ) @final def pct_change( self: NDFrameT, periods=1, fill_method="pad", limit=None, freq=None, **kwargs, ) -> NDFrameT: """ Percentage change between the current and a prior element. Computes the percentage change from the immediately previous row by default. This is useful in comparing the percentage of change in a time series of elements. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. fill_method : str, default 'pad' How to handle NAs **before** computing percent changes. limit : int, default None The number of consecutive NAs to fill before stopping. freq : DateOffset, timedelta, or str, optional Increment to use from time series API (e.g. 'M' or BDay()). **kwargs Additional keyword arguments are passed into `DataFrame.shift` or `Series.shift`. Returns ------- chg : Series or DataFrame The same type as the calling object. See Also -------- Series.diff : Compute the difference of two elements in a Series. DataFrame.diff : Compute the difference of two elements in a DataFrame. Series.shift : Shift the index by some number of periods. DataFrame.shift : Shift the index by some number of periods. Examples -------- **Series** >>> s = pd.Series([90, 91, 85]) >>> s 0 90 1 91 2 85 dtype: int64 >>> s.pct_change() 0 NaN 1 0.011111 2 -0.065934 dtype: float64 >>> s.pct_change(periods=2) 0 NaN 1 NaN 2 -0.055556 dtype: float64 See the percentage change in a Series where filling NAs with last valid observation forward to next valid. >>> s = pd.Series([90, 91, None, 85]) >>> s 0 90.0 1 91.0 2 NaN 3 85.0 dtype: float64 >>> s.pct_change(fill_method='ffill') 0 NaN 1 0.011111 2 0.000000 3 -0.065934 dtype: float64 **DataFrame** Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. >>> df = pd.DataFrame({ ... 'FR': [4.0405, 4.0963, 4.3149], ... 'GR': [1.7246, 1.7482, 1.8519], ... 'IT': [804.74, 810.01, 860.13]}, ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df FR GR IT 1980-01-01 4.0405 1.7246 804.74 1980-02-01 4.0963 1.7482 810.01 1980-03-01 4.3149 1.8519 860.13 >>> df.pct_change() FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 0.013810 0.013684 0.006549 1980-03-01 0.053365 0.059318 0.061876 Percentage of change in GOOG and APPL stock volume. Shows computing the percentage change between columns. >>> df = pd.DataFrame({ ... '2016': [1769950, 30586265], ... '2015': [1500923, 40912316], ... '2014': [1371819, 41403351]}, ... index=['GOOG', 'APPL']) >>> df 2016 2015 2014 GOOG 1769950 1500923 1371819 APPL 30586265 40912316 41403351 >>> df.pct_change(axis='columns', periods=-1) 2016 2015 2014 GOOG 0.179241 0.094112 NaN APPL -0.252395 -0.011860 NaN """ axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name)) if fill_method is None: data = self else: _data = self.fillna(method=fill_method, axis=axis, limit=limit) assert _data is not None # needed for mypy data = _data shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs) # Unsupported left operand type for / ("NDFrameT") rs = data / shifted - 1 # type: ignore[operator] if freq is not None: # Shift method is implemented differently when freq is not None # We want to restore the original index rs = rs.loc[~rs.index.duplicated()] rs = rs.reindex_like(data) return rs.__finalize__(self, method="pct_change") @final def _agg_by_level( self, name: str, axis: Axis = 0, level: Level = 0, skipna: bool_t = True, **kwargs, ): if axis is None: raise ValueError("Must specify 'axis' when aggregating by level.") grouped = self.groupby(level=level, axis=axis, sort=False) if hasattr(grouped, name) and skipna: return getattr(grouped, name)(**kwargs) axis = self._get_axis_number(axis) method = getattr(type(self), name) applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs) return grouped.aggregate(applyf) @final def _logical_func( self, name: str, func, axis: Axis = 0, bool_only: bool_t | None = None, skipna: bool_t = True, level: Level | None = None, **kwargs, ) -> Series | bool_t: nv.validate_logical_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if level is not None: warnings.warn( "Using the level keyword in DataFrame and Series aggregations is " "deprecated and will be removed in a future version. Use groupby " "instead. df.any(level=1) should use df.groupby(level=1).any()", FutureWarning, stacklevel=find_stack_level(), ) if bool_only is not None: raise NotImplementedError( "Option bool_only is not implemented with option level." ) return self._agg_by_level(name, axis=axis, level=level, skipna=skipna) if self.ndim > 1 and axis is None: # Reduce along one dimension then the other, to simplify DataFrame._reduce res = self._logical_func( name, func, axis=0, bool_only=bool_only, skipna=skipna, **kwargs ) return res._logical_func(name, func, skipna=skipna, **kwargs) if ( self.ndim > 1 and axis == 1 and len(self._mgr.arrays) > 1 # TODO(EA2D): special-case not needed and all(x.ndim == 2 for x in self._mgr.arrays) and bool_only is not None and not kwargs ): # Fastpath avoiding potentially expensive transpose obj = self if bool_only: obj = self._get_bool_data() return obj._reduce_axis1(name, func, skipna=skipna) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=bool_only, filter_type="bool", ) def any( self, axis: Axis = 0, bool_only: bool_t | None = None, skipna: bool_t = True, level: Level | None = None, **kwargs, ) -> Series | bool_t: return self._logical_func( "any", nanops.nanany, axis, bool_only, skipna, level, **kwargs ) def all( self, axis: Axis = 0, bool_only: bool_t | None = None, skipna: bool_t = True, level: Level | None = None, **kwargs, ) -> Series | bool_t: return self._logical_func( "all", nanops.nanall, axis, bool_only, skipna, level, **kwargs ) @final def _accum_func( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs, ): skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name) if axis is None: axis = self._stat_axis_number else: axis = self._get_axis_number(axis) if axis == 1: return self.T._accum_func( name, func, axis=0, skipna=skipna, *args, **kwargs ).T def block_accum_func(blk_values): values = blk_values.T if hasattr(blk_values, "T") else blk_values result = nanops.na_accum_func(values, func, skipna=skipna) result = result.T if hasattr(result, "T") else result return result result = self._mgr.apply(block_accum_func) return self._constructor(result).__finalize__(self, method=name) def cummax(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummax", np.maximum.accumulate, axis, skipna, *args, **kwargs ) def cummin(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummin", np.minimum.accumulate, axis, skipna, *args, **kwargs ) def cumsum(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumsum", np.cumsum, axis, skipna, *args, **kwargs) def cumprod(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumprod", np.cumprod, axis, skipna, *args, **kwargs) @final def _stat_function_ddof( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, level: Level | None = None, ddof: int = 1, numeric_only: bool_t | None = None, **kwargs, ) -> Series | float: nv.validate_stat_ddof_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number if level is not None: warnings.warn( "Using the level keyword in DataFrame and Series aggregations is " "deprecated and will be removed in a future version. Use groupby " "instead. df.var(level=1) should use df.groupby(level=1).var().", FutureWarning, stacklevel=find_stack_level(), ) return self._agg_by_level( name, axis=axis, level=level, skipna=skipna, ddof=ddof ) return self._reduce( func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, level: Level | None = None, ddof: int = 1, numeric_only: bool_t | None = None, **kwargs, ) -> Series | float: return self._stat_function_ddof( "sem", nanops.nansem, axis, skipna, level, ddof, numeric_only, **kwargs ) def var( self, axis: Axis | None = None, skipna: bool_t = True, level: Level | None = None, ddof: int = 1, numeric_only: bool_t | None = None, **kwargs, ) -> Series | float: return self._stat_function_ddof( "var", nanops.nanvar, axis, skipna, level, ddof, numeric_only, **kwargs ) def std( self, axis: Axis | None = None, skipna: bool_t = True, level: Level | None = None, ddof: int = 1, numeric_only: bool_t | None = None, **kwargs, ) -> Series | float: return self._stat_function_ddof( "std", nanops.nanstd, axis, skipna, level, ddof, numeric_only, **kwargs ) @final def _stat_function( self, name: str, func, axis: Axis | None | lib.NoDefault = None, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, **kwargs, ): if name == "median": nv.validate_median((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None and level is None and self.ndim > 1: # user must have explicitly passed axis=None # GH#21597 warnings.warn( f"In a future version, DataFrame.{name}(axis=None) will return a " f"scalar {name} over the entire DataFrame. To retain the old " f"behavior, use 'frame.{name}(axis=0)' or just 'frame.{name}()'", FutureWarning, stacklevel=find_stack_level(), ) if axis is lib.no_default: axis = None if axis is None: axis = self._stat_axis_number axis = cast(Axis, axis) if level is not None: warnings.warn( "Using the level keyword in DataFrame and Series aggregations is " "deprecated and will be removed in a future version. Use groupby " "instead. df.median(level=1) should use df.groupby(level=1).median().", FutureWarning, stacklevel=find_stack_level(), ) return self._agg_by_level( name, axis=axis, level=level, skipna=skipna, numeric_only=numeric_only ) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only ) def min( self, axis: Axis | None | lib.NoDefault = lib.no_default, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, **kwargs, ): return self._stat_function( "min", nanops.nanmin, axis, skipna, level, numeric_only, **kwargs, ) def max( self, axis: Axis | None | lib.NoDefault = lib.no_default, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, **kwargs, ): return self._stat_function( "max", nanops.nanmax, axis, skipna, level, numeric_only, **kwargs, ) def mean( self, axis: Axis | None | lib.NoDefault = lib.no_default, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, **kwargs, ) -> Series | float: return self._stat_function( "mean", nanops.nanmean, axis, skipna, level, numeric_only, **kwargs ) def median( self, axis: Axis | None | lib.NoDefault = lib.no_default, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, **kwargs, ) -> Series | float: return self._stat_function( "median", nanops.nanmedian, axis, skipna, level, numeric_only, **kwargs ) def skew( self, axis: Axis | None | lib.NoDefault = lib.no_default, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, **kwargs, ) -> Series | float: return self._stat_function( "skew", nanops.nanskew, axis, skipna, level, numeric_only, **kwargs ) def kurt( self, axis: Axis | None | lib.NoDefault = lib.no_default, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, **kwargs, ) -> Series | float: return self._stat_function( "kurt", nanops.nankurt, axis, skipna, level, numeric_only, **kwargs ) kurtosis = kurt @final def _min_count_stat_function( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, min_count: int = 0, **kwargs, ): if name == "sum": nv.validate_sum((), kwargs) elif name == "prod": nv.validate_prod((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number if level is not None: warnings.warn( "Using the level keyword in DataFrame and Series aggregations is " "deprecated and will be removed in a future version. Use groupby " "instead. df.sum(level=1) should use df.groupby(level=1).sum().", FutureWarning, stacklevel=find_stack_level(), ) return self._agg_by_level( name, axis=axis, level=level, skipna=skipna, min_count=min_count, numeric_only=numeric_only, ) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, min_count=0, **kwargs, ): return self._min_count_stat_function( "sum", nanops.nansum, axis, skipna, level, numeric_only, min_count, **kwargs ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, level: Level | None = None, numeric_only: bool_t | None = None, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "prod", nanops.nanprod, axis, skipna, level, numeric_only, min_count, **kwargs, ) product = prod def mad( self, axis: Axis | None = None, skipna: bool_t = True, level: Level | None = None, ) -> Series | float: """ {desc} Parameters ---------- axis : {axis_descr} Axis for the function to be applied on. skipna : bool, default True Exclude NA/null values when computing the result. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a {name1}. Returns ------- {name1} or {name2} (if level specified)\ {see_also}\ {examples} """ if not is_bool(skipna): warnings.warn( "Passing None for skipna is deprecated and will raise in a future" "version. Pass True instead. Only boolean values will be allowed " "in the future.", FutureWarning, stacklevel=find_stack_level(), ) skipna = True if axis is None: axis = self._stat_axis_number if level is not None: warnings.warn( "Using the level keyword in DataFrame and Series aggregations is " "deprecated and will be removed in a future version. Use groupby " "instead. df.mad(level=1) should use df.groupby(level=1).mad()", FutureWarning, stacklevel=find_stack_level(), ) return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna) data = self._get_numeric_data() if axis == 0: demeaned = data - data.mean(axis=0) else: demeaned = data.sub(data.mean(axis=1), axis=0) return np.abs(demeaned).mean(axis=axis, skipna=skipna) @classmethod def _add_numeric_operations(cls): """ Add the operations to the cls; evaluate the doc strings again """ axis_descr, name1, name2 = _doc_params(cls) @doc( _bool_doc, desc=_any_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_any_see_also, examples=_any_examples, empty_value=False, ) def any(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs): return NDFrame.any(self, axis, bool_only, skipna, level, **kwargs) setattr(cls, "any", any) @doc( _bool_doc, desc=_all_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_all_see_also, examples=_all_examples, empty_value=True, ) def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs): return NDFrame.all(self, axis, bool_only, skipna, level, **kwargs) setattr(cls, "all", all) # error: Argument 1 to "doc" has incompatible type "Optional[str]"; expected # "Union[str, Callable[..., Any]]" @doc( NDFrame.mad.__doc__, # type: ignore[arg-type] desc="Return the mean absolute deviation of the values " "over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, see_also="", examples="", ) def mad(self, axis=None, skipna=True, level=None): return NDFrame.mad(self, axis, skipna, level) setattr(cls, "mad", mad) @doc( _num_ddof_doc, desc="Return unbiased standard error of the mean over requested " "axis.\n\nNormalized by N-1 by default. This can be changed " "using the ddof argument", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples="", ) def sem( self, axis=None, skipna=True, level=None, ddof=1, numeric_only=None, **kwargs, ): return NDFrame.sem(self, axis, skipna, level, ddof, numeric_only, **kwargs) setattr(cls, "sem", sem) @doc( _num_ddof_doc, desc="Return unbiased variance over requested axis.\n\nNormalized by " "N-1 by default. This can be changed using the ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples=_var_examples, ) def var( self, axis=None, skipna=True, level=None, ddof=1, numeric_only=None, **kwargs, ): return NDFrame.var(self, axis, skipna, level, ddof, numeric_only, **kwargs) setattr(cls, "var", var) @doc( _num_ddof_doc, desc="Return sample standard deviation over requested axis." "\n\nNormalized by N-1 by default. This can be changed using the " "ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes=_std_notes, examples=_std_examples, ) def std( self, axis=None, skipna=True, level=None, ddof=1, numeric_only=None, **kwargs, ): return NDFrame.std(self, axis, skipna, level, ddof, numeric_only, **kwargs) setattr(cls, "std", std) @doc( _cnum_doc, desc="minimum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="min", examples=_cummin_examples, ) def cummin(self, axis=None, skipna=True, *args, **kwargs): return NDFrame.cummin(self, axis, skipna, *args, **kwargs) setattr(cls, "cummin", cummin) @doc( _cnum_doc, desc="maximum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="max", examples=_cummax_examples, ) def cummax(self, axis=None, skipna=True, *args, **kwargs): return NDFrame.cummax(self, axis, skipna, *args, **kwargs) setattr(cls, "cummax", cummax) @doc( _cnum_doc, desc="sum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="sum", examples=_cumsum_examples, ) def cumsum(self, axis=None, skipna=True, *args, **kwargs): return NDFrame.cumsum(self, axis, skipna, *args, **kwargs) setattr(cls, "cumsum", cumsum) @doc( _cnum_doc, desc="product", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="prod", examples=_cumprod_examples, ) def cumprod(self, axis=None, skipna=True, *args, **kwargs): return NDFrame.cumprod(self, axis, skipna, *args, **kwargs) setattr(cls, "cumprod", cumprod) @doc( _num_doc, desc="Return the sum of the values over the requested axis.\n\n" "This is equivalent to the method ``numpy.sum``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_sum_examples, ) def sum( self, axis=None, skipna=True, level=None, numeric_only=None, min_count=0, **kwargs, ): return NDFrame.sum( self, axis, skipna, level, numeric_only, min_count, **kwargs ) setattr(cls, "sum", sum) @doc( _num_doc, desc="Return the product of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_prod_examples, ) def prod( self, axis=None, skipna=True, level=None, numeric_only=None, min_count=0, **kwargs, ): return NDFrame.prod( self, axis, skipna, level, numeric_only, min_count, **kwargs ) setattr(cls, "prod", prod) cls.product = prod @doc( _num_doc, desc="Return the mean of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def mean( self, axis: int | None | lib.NoDefault = lib.no_default, skipna=True, level=None, numeric_only=None, **kwargs, ): return NDFrame.mean(self, axis, skipna, level, numeric_only, **kwargs) setattr(cls, "mean", mean) @doc( _num_doc, desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def skew( self, axis: int | None | lib.NoDefault = lib.no_default, skipna=True, level=None, numeric_only=None, **kwargs, ): return NDFrame.skew(self, axis, skipna, level, numeric_only, **kwargs) setattr(cls, "skew", skew) @doc( _num_doc, desc="Return unbiased kurtosis over requested axis.\n\n" "Kurtosis obtained using Fisher's definition of\n" "kurtosis (kurtosis of normal == 0.0). Normalized " "by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def kurt( self, axis: Axis | None | lib.NoDefault = lib.no_default, skipna=True, level=None, numeric_only=None, **kwargs, ): return NDFrame.kurt(self, axis, skipna, level, numeric_only, **kwargs) setattr(cls, "kurt", kurt) cls.kurtosis = kurt @doc( _num_doc, desc="Return the median of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def median( self, axis: int | None | lib.NoDefault = lib.no_default, skipna=True, level=None, numeric_only=None, **kwargs, ): return NDFrame.median(self, axis, skipna, level, numeric_only, **kwargs) setattr(cls, "median", median) # error: Untyped decorator makes function "max" untyped @doc( # type: ignore[misc] _num_doc, desc="Return the maximum of the values over the requested axis.\n\n" "If you want the *index* of the maximum, use ``idxmax``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmax``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_max_examples, ) def max( self, axis: int | None | lib.NoDefault = lib.no_default, skipna=True, level=None, numeric_only=None, **kwargs, ): return NDFrame.max(self, axis, skipna, level, numeric_only, **kwargs) setattr(cls, "max", max) # error: Untyped decorator makes function "max" untyped @doc( # type: ignore[misc] _num_doc, desc="Return the minimum of the values over the requested axis.\n\n" "If you want the *index* of the minimum, use ``idxmin``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmin``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_min_examples, ) def min( self, axis: int | None | lib.NoDefault = lib.no_default, skipna=True, level=None, numeric_only=None, **kwargs, ): return NDFrame.min(self, axis, skipna, level, numeric_only, **kwargs) setattr(cls, "min", min) @final @doc(Rolling) def rolling( self, window: int | timedelta | BaseOffset | BaseIndexer, min_periods: int | None = None, center: bool_t = False, win_type: str | None = None, on: str | None = None, axis: Axis = 0, closed: str | None = None, step: int | None = None, method: str = "single", ): axis = self._get_axis_number(axis) if win_type is not None: return Window( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) return Rolling( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) @final @doc(Expanding) def expanding( self, min_periods: int = 1, center: bool_t | None = None, axis: Axis = 0, method: str = "single", ) -> Expanding: axis = self._get_axis_number(axis) if center is not None: warnings.warn( "The `center` argument on `expanding` will be removed in the future.", FutureWarning, stacklevel=find_stack_level(), ) else: center = False return Expanding( self, min_periods=min_periods, center=center, axis=axis, method=method ) @final @doc(ExponentialMovingWindow) def ewm( self, com: float | None = None, span: float | None = None, halflife: float | TimedeltaConvertibleTypes | None = None, alpha: float | None = None, min_periods: int | None = 0, adjust: bool_t = True, ignore_na: bool_t = False, axis: Axis = 0, times: str | np.ndarray | DataFrame | Series | None = None, method: str = "single", ) -> ExponentialMovingWindow: axis = self._get_axis_number(axis) return ExponentialMovingWindow( self, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, axis=axis, times=times, method=method, ) # ---------------------------------------------------------------------- # Arithmetic Methods @final def _inplace_method(self, other, op): """ Wrap arithmetic method to operate inplace. """ result = op(self, other) if ( self.ndim == 1 and result._indexed_same(self) and is_dtype_equal(result.dtype, self.dtype) ): # GH#36498 this inplace op can _actually_ be inplace. self._values[:] = result._values return self # Delete cacher self._reset_cacher() # this makes sure that we are aligned like the input # we are updating inplace so we want to ignore is_copy self._update_inplace( result.reindex_like(self, copy=False), verify_is_copy=False ) return self def __iadd__(self, other): # error: Unsupported left operand type for + ("Type[NDFrame]") return self._inplace_method(other, type(self).__add__) # type: ignore[operator] def __isub__(self, other): # error: Unsupported left operand type for - ("Type[NDFrame]") return self._inplace_method(other, type(self).__sub__) # type: ignore[operator] def __imul__(self, other): # error: Unsupported left operand type for * ("Type[NDFrame]") return self._inplace_method(other, type(self).__mul__) # type: ignore[operator] def __itruediv__(self, other): # error: Unsupported left operand type for / ("Type[NDFrame]") return self._inplace_method( other, type(self).__truediv__ # type: ignore[operator] ) def __ifloordiv__(self, other): # error: Unsupported left operand type for // ("Type[NDFrame]") return self._inplace_method( other, type(self).__floordiv__ # type: ignore[operator] ) def __imod__(self, other): # error: Unsupported left operand type for % ("Type[NDFrame]") return self._inplace_method(other, type(self).__mod__) # type: ignore[operator] def __ipow__(self, other): # error: Unsupported left operand type for ** ("Type[NDFrame]") return self._inplace_method(other, type(self).__pow__) # type: ignore[operator] def __iand__(self, other): # error: Unsupported left operand type for & ("Type[NDFrame]") return self._inplace_method(other, type(self).__and__) # type: ignore[operator] def __ior__(self, other): # error: Unsupported left operand type for | ("Type[NDFrame]") return self._inplace_method(other, type(self).__or__) # type: ignore[operator] def __ixor__(self, other): # error: Unsupported left operand type for ^ ("Type[NDFrame]") return self._inplace_method(other, type(self).__xor__) # type: ignore[operator] # ---------------------------------------------------------------------- # Misc methods @final def _find_valid_index(self, *, how: str) -> Hashable | None: """ Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index """ idxpos = find_valid_index(self._values, how=how) if idxpos is None: return None return self.index[idxpos] @final @doc(position="first", klass=_shared_doc_kwargs["klass"]) def first_valid_index(self) -> Hashable | None: """ Return index for {position} non-NA value or None, if no non-NA value is found. Returns ------- scalar : type of index Notes ----- If all elements are non-NA/null, returns None. Also returns None for empty {klass}. """ return self._find_valid_index(how="first") @final @doc(first_valid_index, position="last", klass=_shared_doc_kwargs["klass"]) def last_valid_index(self) -> Hashable | None: return self._find_valid_index(how="last") def _doc_params(cls): """Return a tuple of the doc params.""" axis_descr = ( f"{{{', '.join([f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS)])}}}" ) name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar" name2 = cls.__name__ return axis_descr, name, name2 _num_doc = """ {desc} Parameters ---------- axis : {axis_descr} Axis for the function to be applied on. skipna : bool, default True Exclude NA/null values when computing the result. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a {name1}. numeric_only : bool, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. {min_count}\ **kwargs Additional keyword arguments to be passed to the function. Returns ------- {name1} or {name2} (if level specified)\ {see_also}\ {examples} """ _num_ddof_doc = """ {desc} Parameters ---------- axis : {axis_descr} skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a {name1}. ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. numeric_only : bool, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. Returns ------- {name1} or {name2} (if level specified) \ {notes}\ {examples} """ _std_notes = """ Notes ----- To have the same behaviour as `numpy.std`, use `ddof=0` (instead of the default `ddof=1`)""" _std_examples = """ Examples -------- >>> df = pd.DataFrame({'person_id': [0, 1, 2, 3], ... 'age': [21, 25, 62, 43], ... 'height': [1.61, 1.87, 1.49, 2.01]} ... ).set_index('person_id') >>> df age height person_id 0 21 1.61 1 25 1.87 2 62 1.49 3 43 2.01 The standard deviation of the columns can be found as follows: >>> df.std() age 18.786076 height 0.237417 Alternatively, `ddof=0` can be set to normalize by N instead of N-1: >>> df.std(ddof=0) age 16.269219 height 0.205609""" _var_examples = """ Examples -------- >>> df = pd.DataFrame({'person_id': [0, 1, 2, 3], ... 'age': [21, 25, 62, 43], ... 'height': [1.61, 1.87, 1.49, 2.01]} ... ).set_index('person_id') >>> df age height person_id 0 21 1.61 1 25 1.87 2 62 1.49 3 43 2.01 >>> df.var() age 352.916667 height 0.056367 Alternatively, ``ddof=0`` can be set to normalize by N instead of N-1: >>> df.var(ddof=0) age 264.687500 height 0.042275""" _bool_doc = """ {desc} Parameters ---------- axis : {{0 or 'index', 1 or 'columns', None}}, default 0 Indicate which axis or axes should be reduced. * 0 / 'index' : reduce the index, return a Series whose index is the original column labels. * 1 / 'columns' : reduce the columns, return a Series whose index is the original index. * None : reduce all axes, return a scalar. bool_only : bool, default None Include only boolean columns. If None, will attempt to use everything, then use only boolean data. Not implemented for Series. skipna : bool, default True Exclude NA/null values. If the entire row/column is NA and skipna is True, then the result will be {empty_value}, as for an empty row/column. If skipna is False, then NA are treated as True, because these are not equal to zero. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a {name1}. **kwargs : any, default None Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- {name1} or {name2} If level is specified, then, {name2} is returned; otherwise, {name1} is returned. {see_also} {examples}""" _all_desc = """\ Return whether all elements are True, potentially over an axis. Returns True unless there at least one element within a series or along a Dataframe axis that is False or equivalent (e.g. zero or empty).""" _all_examples = """\ Examples -------- **Series** >>> pd.Series([True, True]).all() True >>> pd.Series([True, False]).all() False >>> pd.Series([], dtype="float64").all() True >>> pd.Series([np.nan]).all() True >>> pd.Series([np.nan]).all(skipna=False) True **DataFrames** Create a dataframe from a dictionary. >>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]}) >>> df col1 col2 0 True True 1 True False Default behaviour checks if column-wise values all return True. >>> df.all() col1 True col2 False dtype: bool Specify ``axis='columns'`` to check if row-wise values all return True. >>> df.all(axis='columns') 0 True 1 False dtype: bool Or ``axis=None`` for whether every value is True. >>> df.all(axis=None) False """ _all_see_also = """\ See Also -------- Series.all : Return True if all elements are True. DataFrame.any : Return True if one (or more) elements are True. """ _cnum_doc = """ Return cumulative {desc} over a DataFrame or Series axis. Returns a DataFrame or Series of the same size containing the cumulative {desc}. Parameters ---------- axis : {{0 or 'index', 1 or 'columns'}}, default 0 The index or the name of the axis. 0 is equivalent to None or 'index'. skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- {name1} or {name2} Return cumulative {desc} of {name1} or {name2}. See Also -------- core.window.Expanding.{accum_func_name} : Similar functionality but ignores ``NaN`` values. {name2}.{accum_func_name} : Return the {desc} over {name2} axis. {name2}.cummax : Return cumulative maximum over {name2} axis. {name2}.cummin : Return cumulative minimum over {name2} axis. {name2}.cumsum : Return cumulative sum over {name2} axis. {name2}.cumprod : Return cumulative product over {name2} axis. {examples}""" _cummin_examples = """\ Examples -------- **Series** >>> s = pd.Series([2, np.nan, 5, -1, 0]) >>> s 0 2.0 1 NaN 2 5.0 3 -1.0 4 0.0 dtype: float64 By default, NA values are ignored. >>> s.cummin() 0 2.0 1 NaN 2 2.0 3 -1.0 4 -1.0 dtype: float64 To include NA values in the operation, use ``skipna=False`` >>> s.cummin(skipna=False) 0 2.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 **DataFrame** >>> df = pd.DataFrame([[2.0, 1.0], ... [3.0, np.nan], ... [1.0, 0.0]], ... columns=list('AB')) >>> df A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 By default, iterates over rows and finds the minimum in each column. This is equivalent to ``axis=None`` or ``axis='index'``. >>> df.cummin() A B 0 2.0 1.0 1 2.0 NaN 2 1.0 0.0 To iterate over columns and find the minimum in each row, use ``axis=1`` >>> df.cummin(axis=1) A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 """ _cumsum_examples = """\ Examples -------- **Series** >>> s = pd.Series([2, np.nan, 5, -1, 0]) >>> s 0 2.0 1 NaN 2 5.0 3 -1.0 4 0.0 dtype: float64 By default, NA values are ignored. >>> s.cumsum() 0 2.0 1 NaN 2 7.0 3 6.0 4 6.0 dtype: float64 To include NA values in the operation, use ``skipna=False`` >>> s.cumsum(skipna=False) 0 2.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 **DataFrame** >>> df = pd.DataFrame([[2.0, 1.0], ... [3.0, np.nan], ... [1.0, 0.0]], ... columns=list('AB')) >>> df A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 By default, iterates over rows and finds the sum in each column. This is equivalent to ``axis=None`` or ``axis='index'``. >>> df.cumsum() A B 0 2.0 1.0 1 5.0 NaN 2 6.0 1.0 To iterate over columns and find the sum in each row, use ``axis=1`` >>> df.cumsum(axis=1) A B 0 2.0 3.0 1 3.0 NaN 2 1.0 1.0 """ _cumprod_examples = """\ Examples -------- **Series** >>> s = pd.Series([2, np.nan, 5, -1, 0]) >>> s 0 2.0 1 NaN 2 5.0 3 -1.0 4 0.0 dtype: float64 By default, NA values are ignored. >>> s.cumprod() 0 2.0 1 NaN 2 10.0 3 -10.0 4 -0.0 dtype: float64 To include NA values in the operation, use ``skipna=False`` >>> s.cumprod(skipna=False) 0 2.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 **DataFrame** >>> df = pd.DataFrame([[2.0, 1.0], ... [3.0, np.nan], ... [1.0, 0.0]], ... columns=list('AB')) >>> df A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 By default, iterates over rows and finds the product in each column. This is equivalent to ``axis=None`` or ``axis='index'``. >>> df.cumprod() A B 0 2.0 1.0 1 6.0 NaN 2 6.0 0.0 To iterate over columns and find the product in each row, use ``axis=1`` >>> df.cumprod(axis=1) A B 0 2.0 2.0 1 3.0 NaN 2 1.0 0.0 """ _cummax_examples = """\ Examples -------- **Series** >>> s = pd.Series([2, np.nan, 5, -1, 0]) >>> s 0 2.0 1 NaN 2 5.0 3 -1.0 4 0.0 dtype: float64 By default, NA values are ignored. >>> s.cummax() 0 2.0 1 NaN 2 5.0 3 5.0 4 5.0 dtype: float64 To include NA values in the operation, use ``skipna=False`` >>> s.cummax(skipna=False) 0 2.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 **DataFrame** >>> df = pd.DataFrame([[2.0, 1.0], ... [3.0, np.nan], ... [1.0, 0.0]], ... columns=list('AB')) >>> df A B 0 2.0 1.0 1 3.0 NaN 2 1.0 0.0 By default, iterates over rows and finds the maximum in each column. This is equivalent to ``axis=None`` or ``axis='index'``. >>> df.cummax() A B 0 2.0 1.0 1 3.0 NaN 2 3.0 1.0 To iterate over columns and find the maximum in each row, use ``axis=1`` >>> df.cummax(axis=1) A B 0 2.0 2.0 1 3.0 NaN 2 1.0 1.0 """ _any_see_also = """\ See Also -------- numpy.any : Numpy version of this method. Series.any : Return whether any element is True. Series.all : Return whether all elements are True. DataFrame.any : Return whether any element is True over requested axis. DataFrame.all : Return whether all elements are True over requested axis. """ _any_desc = """\ Return whether any element is True, potentially over an axis. Returns False unless there is at least one element within a series or along a Dataframe axis that is True or equivalent (e.g. non-zero or non-empty).""" _any_examples = """\ Examples -------- **Series** For Series input, the output is a scalar indicating whether any element is True. >>> pd.Series([False, False]).any() False >>> pd.Series([True, False]).any() True >>> pd.Series([], dtype="float64").any() False >>> pd.Series([np.nan]).any() False >>> pd.Series([np.nan]).any(skipna=False) True **DataFrame** Whether each column contains at least one True element (the default). >>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]}) >>> df A B C 0 1 0 0 1 2 2 0 >>> df.any() A True B True C False dtype: bool Aggregating over the columns. >>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]}) >>> df A B 0 True 1 1 False 2 >>> df.any(axis='columns') 0 True 1 True dtype: bool >>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]}) >>> df A B 0 True 1 1 False 0 >>> df.any(axis='columns') 0 True 1 False dtype: bool Aggregating over the entire DataFrame with ``axis=None``. >>> df.any(axis=None) True `any` for an empty DataFrame is an empty Series. >>> pd.DataFrame([]).any() Series([], dtype: bool) """ _shared_docs[ "stat_func_example" ] = """ Examples -------- >>> idx = pd.MultiIndex.from_arrays([ ... ['warm', 'warm', 'cold', 'cold'], ... ['dog', 'falcon', 'fish', 'spider']], ... names=['blooded', 'animal']) >>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx) >>> s blooded animal warm dog 4 falcon 2 cold fish 0 spider 8 Name: legs, dtype: int64 >>> s.{stat_func}() {default_output}""" _sum_examples = _shared_docs["stat_func_example"].format( stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8 ) _sum_examples += """ By default, the sum of an empty or all-NA Series is ``0``. >>> pd.Series([], dtype="float64").sum() # min_count=0 is the default 0.0 This can be controlled with the ``min_count`` parameter. For example, if you'd like the sum of an empty series to be NaN, pass ``min_count=1``. >>> pd.Series([], dtype="float64").sum(min_count=1) nan Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and empty series identically. >>> pd.Series([np.nan]).sum() 0.0 >>> pd.Series([np.nan]).sum(min_count=1) nan""" _max_examples = _shared_docs["stat_func_example"].format( stat_func="max", verb="Max", default_output=8, level_output_0=4, level_output_1=8 ) _min_examples = _shared_docs["stat_func_example"].format( stat_func="min", verb="Min", default_output=0, level_output_0=2, level_output_1=0 ) _stat_func_see_also = """ See Also -------- Series.sum : Return the sum. Series.min : Return the minimum. Series.max : Return the maximum. Series.idxmin : Return the index of the minimum. Series.idxmax : Return the index of the maximum. DataFrame.sum : Return the sum over the requested axis. DataFrame.min : Return the minimum over the requested axis. DataFrame.max : Return the maximum over the requested axis. DataFrame.idxmin : Return the index of the minimum over the requested axis. DataFrame.idxmax : Return the index of the maximum over the requested axis.""" _prod_examples = """ Examples -------- By default, the product of an empty or all-NA Series is ``1`` >>> pd.Series([], dtype="float64").prod() 1.0 This can be controlled with the ``min_count`` parameter >>> pd.Series([], dtype="float64").prod(min_count=1) nan Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and empty series identically. >>> pd.Series([np.nan]).prod() 1.0 >>> pd.Series([np.nan]).prod(min_count=1) nan""" _min_count_stub = """\ min_count : int, default 0 The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the result will be NA. """ def _align_as_utc( left: NDFrameT, right: NDFrameT, join_index: Index | None ) -> tuple[NDFrameT, NDFrameT]: """ If we are aligning timezone-aware DatetimeIndexes and the timezones do not match, convert both to UTC. """ if is_datetime64tz_dtype(left.index.dtype): if left.index.tz != right.index.tz: if join_index is not None: # GH#33671 ensure we don't change the index on # our original Series (NB: by default deep=False) left = left.copy() right = right.copy() left.index = join_index right.index = join_index return left, right
##################### generated by xml-casa (v2) from ptclean6.xml ################## ##################### 6a89d05724a14fedd7b8ceb75d841936 ############################## from __future__ import absolute_import from casashell.private.stack_manip import find_local as __sf__ from casashell.private.stack_manip import find_frame as _find_frame from casatools.typecheck import validator as _pc from casatools.coercetype import coerce as _coerce from suncasatasks import ptclean6 as _ptclean6_t from collections import OrderedDict import numpy import sys import os import shutil def static_var(varname, value): def decorate(func): setattr(func, varname, value) return func return decorate class _ptclean6: """ ptclean6 ---- Parallelized tclean in consecutive time steps Parallelized clean in consecutive time steps. Packed over CASA 6 tclean. --------- parameter descriptions --------------------------------------------- vis Name(s) of input visibility file(s) default: none; example: vis='ngc5921.ms' vis=['ngc5921a.ms','ngc5921b.ms']; multiple MSes imageprefix Prefix of output image names (usually useful in defining the output path) imagesuffix Suffix of output image names (usually useful in specifyting the image type, version, etc.) ncpu Number of cpu cores to use twidth Number of time pixels to average doreg True if use vla_prep to register the image usephacenter True if use the phacenter information from the measurement set (e.g., VLA); False to assume the phase center is at the solar disk center (EOVSA) reftime Reference time of the J2000 coordinates associated with the ephemeris target. e.g., "2012/03/03/12:00". This is used for helioimage2fits.py to find the solar x y offset in order to register the image. If not set, use the actual timerange of the image (default) toTb True if convert to brightness temperature sclfactor scale the brightness temperature up by its value subregion The name of a CASA region string The name of a CASA image or region file or region string. Only locations within the region will output to the fits file. If regions specified fall completely outside of the image, ptclean6 will throw an error. Manual mask options/examples : subregion='box[[224pix,224pix],[288pix,288pix]]' : A CASA region string. docompress True if compress the output fits files overwrite True if overwrite the image selectdata Enable data selection parameters. field to image or mosaic. Use field id(s) or name(s). ['go listobs' to obtain the list id's or names] default: ''= all fields If field string is a non-negative integer, it is assumed to be a field index otherwise, it is assumed to be a field name field='0~2'; field ids 0,1,2 field='0,4,5~7'; field ids 0,4,5,6,7 field='3C286,3C295'; field named 3C286 and 3C295 field = '3,4C*'; field id 3, all names starting with 4C For multiple MS input, a list of field strings can be used: field = ['0~2','0~4']; field ids 0-2 for the first MS and 0-4 for the second field = '0~2'; field ids 0-2 for all input MSes spw l window/channels NOTE: channels de-selected here will contain all zeros if selected by the parameter mode subparameters. default: ''=all spectral windows and channels spw='0~2,4'; spectral windows 0,1,2,4 (all channels) spw='0:5~61'; spw 0, channels 5 to 61 spw='<2'; spectral windows less than 2 (i.e. 0,1) spw='0,10,3:3~45'; spw 0,10 all channels, spw 3, channels 3 to 45. spw='0~2:2~6'; spw 0,1,2 with channels 2 through 6 in each. For multiple MS input, a list of spw strings can be used: spw=['0','0~3']; spw ids 0 for the first MS and 0-3 for the second spw='0~3' spw ids 0-3 for all input MS spw='3:10~20;50~60' for multiple channel ranges within spw id 3 spw='3:10~20;50~60,4:0~30' for different channel ranges for spw ids 3 and 4 spw='0:0~10,1:20~30,2:1;2;3'; spw 0, channels 0-10, spw 1, channels 20-30, and spw 2, channels, 1,2 and 3 spw='1~4;6:15~48' for channels 15 through 48 for spw ids 1,2,3,4 and 6 timerange Range of time to select from data default: '' (all); examples, timerange = 'YYYY/MM/DD/hh:mm:ss~YYYY/MM/DD/hh:mm:ss' Note: if YYYY/MM/DD is missing date defaults to first day in data set timerange='09:14:0~09:54:0' picks 40 min on first day timerange='25:00:00~27:30:00' picks 1 hr to 3 hr 30min on NEXT day timerange='09:44:00' pick data within one integration of time timerange='> 10:24:00' data after this time For multiple MS input, a list of timerange strings can be used: timerange=['09:14:0~09:54:0','> 10:24:00'] timerange='09:14:0~09:54:0''; apply the same timerange for all input MSes uvrange Select data within uvrange (default unit is meters) default: '' (all); example: uvrange='0~1000klambda'; uvrange from 0-1000 kilo-lambda uvrange='> 4klambda';uvranges greater than 4 kilo lambda For multiple MS input, a list of uvrange strings can be used: uvrange=['0~1000klambda','100~1000klamda'] uvrange='0~1000klambda'; apply 0-1000 kilo-lambda for all input MSes antenna Select data based on antenna/baseline default: '' (all) If antenna string is a non-negative integer, it is assumed to be an antenna index, otherwise, it is considered an antenna name. antenna='5\&6'; baseline between antenna index 5 and index 6. antenna='VA05\&VA06'; baseline between VLA antenna 5 and 6. antenna='5\&6;7\&8'; baselines 5-6 and 7-8 antenna='5'; all baselines with antenna index 5 antenna='05'; all baselines with antenna number 05 (VLA old name) antenna='5,6,9'; all baselines with antennas 5,6,9 index number For multiple MS input, a list of antenna strings can be used: antenna=['5','5\&6']; antenna='5'; antenna index 5 for all input MSes antenna='!DV14'; use all antennas except DV14 scan Scan number range default: '' (all) example: scan='1~5' For multiple MS input, a list of scan strings can be used: scan=['0~100','10~200'] scan='0~100; scan ids 0-100 for all input MSes observation Observation ID range default: '' (all) example: observation='1~5' intent Scan Intent(s) default: '' (all) example: intent='TARGET_SOURCE' example: intent='TARGET_SOURCE1,TARGET_SOURCE2' example: intent='TARGET_POINTING*' datacolumn Data column to image (data or observed, corrected) default:'corrected' ( If 'corrected' does not exist, it will use 'data' instead ) imagename Pre-name of output images example : imagename='try' Output images will be (a subset of) : try.psf - Point spread function try.residual - Residual image try.image - Restored image try.model - Model image (contains only flux components) try.sumwt - Single pixel image containing sum-of-weights. (for natural weighting, sensitivity=1/sqrt(sumwt)) try.pb - Primary beam model (values depend on the gridder used) Widefield projection algorithms (gridder=mosaic,awproject) will compute the following images too. try.weight - FT of gridded weights or the un-normalized sum of PB-square (for all pointings) Here, PB = sqrt(weight) normalized to a maximum of 1.0 For multi-term wideband imaging, all relevant images above will have additional .tt0,.tt1, etc suffixes to indicate Taylor terms, plus the following extra output images. try.alpha - spectral index try.alpha.error - estimate of error on spectral index try.beta - spectral curvature (if nterms \> 2) Tip : Include a directory name in 'imagename' for all output images to be sent there instead of the current working directory : imagename='mydir/try' Tip : Restarting an imaging run without changing 'imagename' implies continuation from the existing model image on disk. - If 'startmodel' was initially specified it needs to be set to "" for the restart run (or tclean will exit with an error message). - By default, the residual image and psf will be recomputed but if no changes were made to relevant parameters between the runs, set calcres=False, calcpsf=False to resume directly from the minor cycle without the (unnecessary) first major cycle. To automatically change 'imagename' with a numerical increment, set restart=False (see tclean docs for 'restart'). Note : All imaging runs will by default produce restored images. For a niter=0 run, this will be redundant and can optionally be turned off via the 'restoration=T/F' parameter. imsize Number of pixels example : imsize = [350,250] imsize = 500 is equivalent to [500,500] To take proper advantage of internal optimized FFT routines, the number of pixels must be even and factorizable by 2,3,5,7 only. cell Cell size example: cell=['0.5arcsec,'0.5arcsec'] or cell=['1arcmin', '1arcmin'] cell = '1arcsec' is equivalent to ['1arcsec','1arcsec'] phasecenter Phase center of the image (string or field id); if the phasecenter is the name known major solar system object ('MERCURY', 'VENUS', 'MARS', 'JUPITER', 'SATURN', 'URANUS', 'NEPTUNE', 'PLUTO', 'SUN', 'MOON') or is an ephemerides table then that source is tracked and the background sources get smeared. There is a special case, when phasecenter='TRACKFIELD', which will use the ephemerides or polynomial phasecenter in the FIELD table of the MS's as the source center to track. example: phasecenter=6 phasecenter='J2000 19h30m00 -40d00m00' phasecenter='J2000 292.5deg -40.0deg' phasecenter='J2000 5.105rad -0.698rad' phasecenter='ICRS 13:05:27.2780 -049.28.04.458' phasecenter='myComet_ephem.tab' phasecenter='MOON' phasecenter='TRACKFIELD' stokes Stokes Planes to make default='I'; example: stokes='IQUV'; Options: 'I','Q','U','V','IV','QU','IQ','UV','IQUV','RR','LL','XX','YY','RRLL','XXYY','pseudoI' Note : Due to current internal code constraints, if any correlation pair is flagged, by default, no data for that row in the MS will be used. So, in an MS with XX,YY, if only YY is flagged, neither a Stokes I image nor an XX image can be made from those data points. In such a situation, please split out only the unflagged correlation into a separate MS. Note : The 'pseudoI' option is a partial solution, allowing Stokes I imaging when either of the parallel-hand correlations are unflagged. The remaining constraints shall be removed (where logical) in a future release. projection Coordinate projection Examples : SIN, NCP A list of supported (but untested) projections can be found here : http://casa.nrao.edu/active/docs/doxygen/html/classcasa_1_1Projection.html#a3d5f9ec787e4eabdce57ab5edaf7c0cd startmodel Name of starting model image The contents of the supplied starting model image will be copied to the imagename.model before the run begins. example : startmodel = 'singledish.im' For deconvolver='mtmfs', one image per Taylor term must be provided. example : startmodel = ['try.model.tt0', 'try.model.tt1'] startmodel = ['try.model.tt0'] will use a starting model only for the zeroth order term. startmodel = ['','try.model.tt1'] will use a starting model only for the first order term. This starting model can be of a different image shape and size from what is currently being imaged. If so, an image regrid is first triggered to resample the input image onto the target coordinate system. A common usage is to set this parameter equal to a single dish image Negative components in the model image will be included as is. [ Note : If an error occurs during image resampling/regridding, please try using task imregrid to resample the starting model image onto a CASA image with the target shape and coordinate system before supplying it via startmodel ] specmode Spectral definition mode (mfs,cube,cubedata, cubesource) mode='mfs' : Continuum imaging with only one output image channel. (mode='cont' can also be used here) mode='cube' : Spectral line imaging with one or more channels Parameters start, width,and nchan define the spectral coordinate system and can be specified either in terms of channel numbers, frequency or velocity in whatever spectral frame is specified in 'outframe'. All internal and output images are made with outframe as the base spectral frame. However imaging code internally uses the fixed spectral frame, LSRK for automatic internal software Doppler tracking so that a spectral line observed over an extended time range will line up appropriately. Therefore the output images have additional spectral frame conversion layer in LSRK on the top the base frame. (Note : Even if the input parameters are specified in a frame other than LSRK, the viewer still displays spectral axis in LSRK by default because of the conversion frame layer mentioned above. The viewer can be used to relabel the spectral axis in any desired frame - via the spectral reference option under axis label properties in the data display options window.) mode='cubedata' : Spectral line imaging with one or more channels There is no internal software Doppler tracking so a spectral line observed over an extended time range may be smeared out in frequency. There is strictly no valid spectral frame with which to label the output images, but they will list the frame defined in the MS. mode='cubesource': Spectral line imaging while tracking moving source (near field or solar system objects). The velocity of the source is accounted and the frequency reported is in the source frame. As there is not SOURCE frame defined, the frame reported will be REST (as it may not be in the rest frame emission region may be moving w.r.t the systemic velocity frame) reffreq Reference frequency of the output image coordinate system Example : reffreq='1.5GHz' as a string with units. By default, it is calculated as the middle of the selected frequency range. For deconvolver='mtmfs' the Taylor expansion is also done about this specified reference frequency. nchan Number of channels in the output image For default (=-1), the number of channels will be automatically determined based on data selected by 'spw' with 'start' and 'width'. It is often easiest to leave nchan at the default value. example: nchan=100 start First channel (e.g. start=3,start=\'1.1GHz\',start=\'15343km/s\') of output cube images specified by data channel number (integer), velocity (string with a unit), or frequency (string with a unit). Default:''; The first channel is automatically determined based on the 'spw' channel selection and 'width'. When the channel number is used along with the channel selection in 'spw' (e.g. spw='0:6~100'), 'start' channel number is RELATIVE (zero-based) to the selected channels in 'spw'. So for the above example, start=1 means that the first image channel is the second selected data channel, which is channel 7. For specmode='cube', when velocity or frequency is used it is interpreted with the frame defined in outframe. [The parameters of the desired output cube can be estimated by using the 'transform' functionality of 'plotms'] examples: start='5.0km/s'; 1st channel, 5.0km/s in outframe start='22.3GHz'; 1st channel, 22.3GHz in outframe width Channel width (e.g. width=2,width=\'0.1MHz\',width=\'10km/s\') of output cube images specified by data channel number (integer), velocity (string with a unit), or or frequency (string with a unit). Default:''; data channel width The sign of width defines the direction of the channels to be incremented. For width specified in velocity or frequency with '-' in front gives image channels in decreasing velocity or frequency, respectively. For specmode='cube', when velocity or frequency is used it is interpreted with the reference frame defined in outframe. examples: width='2.0km/s'; results in channels with increasing velocity width='-2.0km/s'; results in channels with decreasing velocity width='40kHz'; results in channels with increasing frequency width=-2; results in channels averaged of 2 data channels incremented from high to low channel numbers outframe Spectral reference frame in which to interpret \'start\' and \'width\' Options: '','LSRK','LSRD','BARY','GEO','TOPO','GALACTO','LGROUP','CMB' example: outframe='bary' for Barycentric frame REST -- Rest frequency LSRD -- Local Standard of Rest (J2000) -- as the dynamical definition (IAU, [9,12,7] km/s in galactic coordinates) LSRK -- LSR as a kinematical (radio) definition -- 20.0 km/s in direction ra,dec = [270,+30] deg (B1900.0) BARY -- Barycentric (J2000) GEO --- Geocentric TOPO -- Topocentric GALACTO -- Galacto centric (with rotation of 220 km/s in direction l,b = [90,0] deg. LGROUP -- Local group velocity -- 308km/s towards l,b = [105,-7] deg (F. Ghigo) CMB -- CMB velocity -- 369.5km/s towards l,b = [264.4, 48.4] deg (F. Ghigo) DEFAULT = LSRK veltype Velocity type (radio, z, ratio, beta, gamma, optical) For start and/or width specified in velocity, specifies the velocity definition Options: 'radio','optical','z','beta','gamma','optical' NOTE: the viewer always defaults to displaying the 'radio' frame, but that can be changed in the position tracking pull down. The different types (with F = f/f0, the frequency ratio), are: Z = (-1 + 1/F) RATIO = (F) * RADIO = (1 - F) OPTICAL == Z BETA = ((1 - F2)/(1 + F2)) GAMMA = ((1 + F2)/2F) * RELATIVISTIC == BETA (== v/c) DEFAULT == RADIO Note that the ones with an '*' have no real interpretation (although the calculation will proceed) if given as a velocity. restfreq List of rest frequencies or a rest frequency in a string. Specify rest frequency to use for output image. *Currently it uses the first rest frequency in the list for translation of velocities. The list will be stored in the output images. Default: []; look for the rest frequency stored in the MS, if not available, use center frequency of the selected channels examples: restfreq=['1.42GHz'] restfreq='1.42GHz' interpolation Spectral interpolation (nearest,linear,cubic) Interpolation rules to use when binning data channels onto image channels and evaluating visibility values at the centers of image channels. Note : 'linear' and 'cubic' interpolation requires data points on both sides of each image frequency. Errors are therefore possible at edge channels, or near flagged data channels. When image channel width is much larger than the data channel width there is nothing much to be gained using linear or cubic thus not worth the extra computation involved. perchanweightdensity When calculating weight density for Briggs style weighting in a cube, this parameter determines whether to calculate the weight density for each channel independently (the default, True) or a common weight density for all of the selected data. This parameter has no meaning for continuum (specmode='mfs') imaging or for natural and radial weighting schemes. For cube imaging perchanweightdensity=True is a recommended option that provides more uniform sensitivity per channel for cubes, but with generally larger psfs than the perchanweightdensity=False (prior behavior) option. When using Briggs style weight with perchanweightdensity=True, the imaging weight density calculations use only the weights of data that contribute specifically to that channel. On the other hand, when perchanweightdensity=False, the imaging weight density calculations sum all of the weights from all of the data channels selected whose (u,v) falls in a given uv cell on the weight density grid. Since the aggregated weights, in any given uv cell, will change depending on the number of channels included when imaging, the psf calculated for a given frequency channel will also necessarily change, resulting in variability in the psf for a given frequency channel when perchanweightdensity=False. In general, perchanweightdensity=False results in smaller psfs for the same value of robustness compared to perchanweightdensity=True, but the rms noise as a function of channel varies and increases toward the edge channels; perchanweightdensity=True provides more uniform sensitivity per channel for cubes. This may make it harder to find estimates of continuum when perchanweightdensity=False. If you intend to image a large cube in many smaller subcubes and subsequently concatenate, it is advisable to use perchanweightdensity=True to avoid surprisingly varying sensitivity and psfs across the concatenated cube. gridder Gridding options (standard, wproject, widefield, mosaic, awproject) The following options choose different gridding convolution functions for the process of convolutional resampling of the measured visibilities onto a regular uv-grid prior to an inverse FFT. Model prediction (degridding) also uses these same functions. Several wide-field effects can be accounted for via careful choices of convolution functions. Gridding (degridding) runtime will rise in proportion to the support size of these convolution functions (in uv-pixels). standard : Prolate Spheroid with 7x7 uv pixel support size [ This mode can also be invoked using 'ft' or 'gridft' ] wproject : W-Projection algorithm to correct for the widefield non-coplanar baseline effect. [Cornwell et.al 2008] wprojplanes is the number of distinct w-values at which to compute and use different gridding convolution functions (see help for wprojplanes). Convolution function support size can range from 5x5 to few 100 x few 100. [ This mode can also be invoked using 'wprojectft' ] widefield : Facetted imaging with or without W-Projection per facet. A set of facets x facets subregions of the specified image are gridded separately using their respective phase centers (to minimize max W). Deconvolution is done on the joint full size image, using a PSF from the first subregion. wprojplanes=1 : standard prolate spheroid gridder per facet. wprojplanes > 1 : W-Projection gridder per facet. nfacets=1, wprojplanes > 1 : Pure W-Projection and no facetting nfacets=1, wprojplanes=1 : Same as standard,ft,gridft A combination of facetting and W-Projection is relevant only for very large fields of view. (In our current version of tclean, this combination runs only with parallel=False. mosaic : A-Projection with azimuthally symmetric beams without sidelobes, beam rotation or squint correction. Gridding convolution functions per visibility are computed from FTs of PB models per antenna. This gridder can be run on single fields as well as mosaics. VLA : PB polynomial fit model (Napier and Rots, 1982) EVLA : PB polynomial fit model (Perley, 2015) ALMA : Airy disks for a 10.7m dish (for 12m dishes) and 6.25m dish (for 7m dishes) each with 0.75m blockages (Hunter/Brogan 2011). Joint mosaic imaging supports heterogeneous arrays for ALMA. Typical gridding convolution function support sizes are between 7 and 50 depending on the desired accuracy (given by the uv cell size or image field of view). [ This mode can also be invoked using 'mosaicft' or 'ftmosaic' ] awproject : A-Projection with azimuthally asymmetric beams and including beam rotation, squint correction, conjugate frequency beams and W-projection. [Bhatnagar et.al, 2008] Gridding convolution functions are computed from aperture illumination models per antenna and optionally combined with W-Projection kernels and a prolate spheroid. This gridder can be run on single fields as well as mosaics. VLA : Uses ray traced model (VLA and EVLA) including feed leg and subreflector shadows, off-axis feed location (for beam squint and other polarization effects), and a Gaussian fit for the feed beams (Ref: Brisken 2009) ALMA : Similar ray-traced model as above (but the correctness of its polarization properties remains un-verified). Typical gridding convolution function support sizes are between 7 and 50 depending on the desired accuracy (given by the uv cell size or image field of view). When combined with W-Projection they can be significantly larger. [ This mode can also be invoked using 'awprojectft' ] imagemosaic : (untested implementation) Grid and iFT each pointing separately and combine the images as a linear mosaic (weighted by a PB model) in the image domain before a joint minor cycle. VLA/ALMA PB models are same as for gridder='mosaicft' ------ Notes on PB models : (1) Several different sources of PB models are used in the modes listed above. This is partly for reasons of algorithmic flexibility and partly due to the current lack of a common beam model repository or consensus on what beam models are most appropriate. (2) For ALMA and gridder='mosaic', ray-traced (TICRA) beams are also available via the vpmanager tool. For example, call the following before the tclean run. vp.setpbimage(telescope="ALMA", compleximage='/home/casa/data/trunk/alma/responses/ALMA_0_DV__0_0_360_0_45_90_348.5_373_373_GHz_ticra2007_VP.im', antnames=['DV'+'%02d'%k for k in range(25)]) vp.saveastable('mypb.tab') Then, supply vptable='mypb.tab' to tclean. ( Currently this will work only for non-parallel runs ) ------ Note on PB masks : In tclean, A-Projection gridders (mosaic and awproject) produce a .pb image and use the 'pblimit' subparameter to decide normalization cutoffs and construct an internal T/F mask in the .pb and .image images. However, this T/F mask cannot directly be used during deconvolution (which needs a 1/0 mask). There are two options for making a pb based deconvolution mask. -- Run tclean with niter=0 to produce the .pb, construct a 1/0 image with the desired threshold (using ia.open('newmask.im'); ia.calc('iif("xxx.pb">0.3,1.0,0.0)');ia.close() for example), and supply it via the 'mask' parameter in a subsequent run (with calcres=F and calcpsf=F to restart directly from the minor cycle). -- Run tclean with usemask='pb' for it to automatically construct a 1/0 mask from the internal T/F mask from .pb at a fixed 0.2 threshold. ----- Making PBs for gridders other than mosaic,awproject After the PSF generation, a PB is constructed using the same models used in gridder='mosaic' but just evaluated in the image domain without consideration to weights. facets Number of facets on a side A set of (facets x facets) subregions of the specified image are gridded separately using their respective phase centers (to minimize max W). Deconvolution is done on the joint full size image, using a PSF from the first subregion/facet. In our current version of tclean, facets>1 may be used only with parallel=False. psfphasecenter For mosaic use psf centered on this optional direction. You may need to use this if for example the mosaic does not have any pointing in the center of the image. Another reason; as the psf is approximate for a mosaic, this may help to deconvolve a non central bright source well and quickly. example: psfphasecenter=6 #center psf on field 6 psfphasecenter='J2000 19h30m00 -40d00m00' psfphasecenter='J2000 292.5deg -40.0deg' psfphasecenter='J2000 5.105rad -0.698rad' psfphasecenter='ICRS 13:05:27.2780 -049.28.04.458' wprojplanes Number of distinct w-values at which to compute and use different gridding convolution functions for W-Projection An appropriate value of wprojplanes depends on the presence/absence of a bright source far from the phase center, the desired dynamic range of an image in the presence of a bright far out source, the maximum w-value in the measurements, and the desired trade off between accuracy and computing cost. As a (rough) guide, VLA L-Band D-config may require a value of 128 for a source 30arcmin away from the phase center. A-config may require 1024 or more. To converge to an appropriate value, try starting with 128 and then increasing it if artifacts persist. W-term artifacts (for the VLA) typically look like arc-shaped smears in a synthesis image or a shift in source position between images made at different times. These artifacts are more pronounced the further the source is from the phase center. There is no harm in simply always choosing a large value (say, 1024) but there will be a significant performance cost to doing so, especially for gridder='awproject' where it is combined with A-Projection. wprojplanes=-1 is an option for gridder='widefield' or 'wproject' in which the number of planes is automatically computed. vptable vpmanager vptable="" : Choose default beams for different telescopes ALMA : Airy disks EVLA : old VLA models. Other primary beam models can be chosen via the vpmanager tool. Step 1 : Set up the vpmanager tool and save its state in a table vp.setpbpoly(telescope='EVLA', coeff=[1.0, -1.529e-3, 8.69e-7, -1.88e-10]) vp.saveastable('myvp.tab') Step 2 : Supply the name of that table in tclean. tclean(....., vptable='myvp.tab',....) Please see the documentation for the vpmanager for more details on how to choose different beam models. Work is in progress to update the defaults for EVLA and ALMA. Note : AWProjection currently does not use this mechanism to choose beam models. It instead uses ray-traced beams computed from parameterized aperture illumination functions, which are not available via the vpmanager. So, gridder='awproject' does not allow the user to set this parameter. mosweight When doing Brigg's style weighting (including uniform) to perform the weight density calculation for each field indepedently if True. If False the weight density is calculated from the average uv distribution of all the fields. aterm Use aperture illumination functions during gridding This parameter turns on the A-term of the AW-Projection gridder. Gridding convolution functions are constructed from aperture illumination function models of each antenna. psterm Include the Prolate Spheroidal (PS) funtion as the anti-aliasing operator in the gridding convolution functions used for gridding. Setting this parameter to true is necessary when aterm is set to false. It can be set to false when aterm is set to true, though with this setting effects of aliasing may be there in the image, particularly near the edges. When set to true, the .pb images will contain the fourier transform of the of the PS funtion. The table below enumarates the functional effects of the psterm, aterm and wprojplanes settings. PB referes to the Primary Beam and FT() refers to the Fourier transform operation. Operation aterm psterm wprojplanes Contents of the .pb image ---------------------------------------------------------------------- AW-Projection True True >1 FT(PS) x PB False PB A-Projection True True 1 FT(PS) x PB False PB W-Projection False True >1 FT(PS) Standard False True 1 FT(PS) wbawp Use frequency dependent A-terms Scale aperture illumination functions appropriately with frequency when gridding and combining data from multiple channels. conjbeams Use conjugate frequency for wideband A-terms While gridding data from one frequency channel, choose a convolution function from a 'conjugate' frequency such that the resulting baseline primary beam is approximately constant across frequency. For a system in which the primary beam scales with frequency, this step will eliminate instrumental spectral structure from the measured data and leave only the sky spectrum for the minor cycle to model and reconstruct [Bhatnagar et al., ApJ, 2013]. As a rough guideline for when this is relevant, a source at the half power point of the PB at the center frequency will see an artificial spectral index of -1.4 due to the frequency dependence of the PB [Sault and Wieringa, 1994]. If left uncorrected during gridding, this spectral structure must be modeled in the minor cycle (using the mtmfs algorithm) to avoid dynamic range limits (of a few hundred for a 2:1 bandwidth). This works for specmode='mfs' and its value is ignored for cubes cfcache Convolution function cache directory name Name of a directory in which to store gridding convolution functions. This cache is filled at the beginning of an imaging run. This step can be time consuming but the cache can be reused across multiple imaging runs that use the same image parameters (cell size, image size , spectral data selections, wprojplanes, wbawp, psterm, aterm). The effect of the wbawp, psterm and aterm settings is frozen-in in the cfcache. Using an existing cfcache made with a different setting of these parameters will not reflect the current settings. In a parallel execution, the construction of the cfcache is also parallelized and the time to compute scales close to linearly with the number of compute cores used. With the re-computation of Convolution Functions (CF) due to PA rotation turned-off (the computepastep parameter), the total number of in the cfcache can be computed as [No. of wprojplanes x No. of selected spectral windows x 4] By default, cfcache = imagename + '.cf' usepointing The usepointing flag informs the gridder that it should utilize the pointing table to use the correct direction in which the antenna is pointing with respect to the pointing phasecenter. computepastep Parallactic angle interval after the AIFs are recomputed (deg) This parameter controls the accuracy of the aperture illumination function used with AProjection for alt-az mount dishes where the AIF rotates on the sky as the synthesis image is built up. Once the PA in the data changes by the given interval, AIFs are re-computed at the new PA. A value of 360.0 deg (the default) implies no re-computation due to PA rotation. AIFs are computed for the PA value of the first valid data received and used for all of the data. rotatepastep Parallactic angle interval after which the nearest AIF is rotated (deg) Instead of recomputing the AIF for every timestep's parallactic angle, the nearest existing AIF is used and rotated after the PA changed by rotatepastep value. A value of 360.0 deg (the default) disables rotation of the AIF. For example, computepastep=360.0 and rotatepastep=5.0 will compute the AIFs at only the starting parallactic angle and all other timesteps will use a rotated version of that AIF at the nearest 5.0 degree point. pointingoffsetsigdev Corrections for heterogenous and time-dependent pointing offsets via AWProjection are controlled by this parameter. It is a vector of 2 ints or doubles each of which is interpreted in units of arcsec. Based on the first threshold, a clustering algorithm is applied to entries from the POINTING subtable of the MS to determine how distinct antenna groups for which the pointing offset must be computed separately. The second number controls how much a pointing change across time can be ignored and after which an antenna rebinning is required. Note : The default value of this parameter is [], due a programmatic constraint. If run with this value, it will internally pick [600,600] and exercise the option of using large tolerances (10arcmin) on both axes. Please choose a setting explicitly for runs that need to use this parameter. Note : This option is available only for gridder='awproject' and usepointing=True and and has been validated primarily with VLASS on-the-fly mosaic data where POINTING subtables have been modified after the data are recorded. Examples of parameter usage : [100.0,100.0] : Pointing offsets of 100 arcsec or less are considered small enough to be ignored. Using large values for both indicates a homogeneous array. [10.0, 100.0] : Based on entries in the POINTING subtable, antennas are grouped into clusters based on a 10arcsec bin size. All antennas in a bin are given a pointing offset calculated as the average of the offsets of all antennas in the bin. On the time axis, offset changes upto 100 arcsec will be ignored. [10.0,10.0] : Calculate separate pointing offsets for each antenna group (with a 10 arcsec bin size). As a function of time, recalculate the antenna binning if the POINTING table entries change by more than 10 arcsec w.r.to the previously computed binning. [1.0, 1.0] : Tight tolerances will imply a fully heterogenous situation where each antenna gets its own pointing offset. Also, time-dependent offset changes greater than 1 arcsec will trigger recomputes of the phase gradients. This is the most general situation and is also the most expensive option as it constructs and uses separate phase gradients for all baselines and timesteps. For VLASS 1.1 data with two kinds of pointing offsets, the recommended setting is [ 30.0, 30.0 ]. For VLASS 1.2 data with only the time-dependent pointing offsets, the recommended setting is [ 300.0, 30.0 ] to turn off the antenna grouping but to retain the time dependent corrections required from one timestep to the next. pblimit PB gain level at which to cut off normalizations Divisions by .pb during normalizations have a cut off at a .pb gain level given by pblimit. Outside this limit, image values are set to zero. Additionally, by default, an internal T/F mask is applied to the .pb, .image and .residual images to mask out (T) all invalid pixels outside the pblimit area. Note : This internal T/F mask cannot be used as a deconvolution mask. To do so, please follow the steps listed above in the Notes for the 'gridder' parameter. Note : To prevent the internal T/F mask from appearing in anything other than the .pb and .image.pbcor images, 'pblimit' can be set to a negative number. The absolute value will still be used as a valid 'pblimit'. A tclean restart using existing output images on disk that already have this T/F mask in the .residual and .image but only pblimit set to a negative value, will remove this mask after the next major cycle. normtype Normalization type (flatnoise, flatsky, pbsquare) Gridded (and FT'd) images represent the PB-weighted sky image. Qualitatively it can be approximated as two instances of the PB applied to the sky image (one naturally present in the data and one introduced during gridding via the convolution functions). xxx.weight : Weight image approximately equal to sum ( square ( pb ) ) xxx.pb : Primary beam calculated as sqrt ( xxx.weight ) normtype='flatnoise' : Divide the raw image by sqrt(.weight) so that the input to the minor cycle represents the product of the sky and PB. The noise is 'flat' across the region covered by each PB. normtype='flatsky' : Divide the raw image by .weight so that the input to the minor cycle represents only the sky. The noise is higher in the outer regions of the primary beam where the sensitivity is low. normtype='pbsquare' : No normalization after gridding and FFT. The minor cycle sees the sky times pb square deconvolver Name of minor cycle algorithm (hogbom,clark,multiscale,mtmfs,mem,clarkstokes) Each of the following algorithms operate on residual images and psfs from the gridder and produce output model and restored images. Minor cycles stop and a major cycle is triggered when cyclethreshold or cycleniter are reached. For all methods, components are picked from the entire extent of the image or (if specified) within a mask. hogbom : An adapted version of Hogbom Clean [Hogbom, 1974] - Find the location of the peak residual - Add this delta function component to the model image - Subtract a scaled and shifted PSF of the same size as the image from regions of the residual image where the two overlap. - Repeat clark : An adapted version of Clark Clean [Clark, 1980] - Find the location of max(I^2+Q^2+U^2+V^2) - Add delta functions to each stokes plane of the model image - Subtract a scaled and shifted PSF within a small patch size from regions of the residual image where the two overlap. - After several iterations trigger a Clark major cycle to subtract components from the visibility domain, but without de-gridding. - Repeat ( Note : 'clark' maps to imagermode='' in the old clean task. 'clark_exp' is another implementation that maps to imagermode='mosaic' or 'csclean' in the old clean task but the behavior is not identical. For now, please use deconvolver='hogbom' if you encounter problems. ) clarkstokes : Clark Clean operating separately per Stokes plane (Note : 'clarkstokes_exp' is an alternate version. See above.) multiscale : MultiScale Clean [Cornwell, 2008] - Smooth the residual image to multiple scale sizes - Find the location and scale at which the peak occurs - Add this multiscale component to the model image - Subtract a scaled,smoothed,shifted PSF (within a small patch size per scale) from all residual images - Repeat from step 2 mtmfs : Multi-term (Multi Scale) Multi-Frequency Synthesis [Rau and Cornwell, 2011] - Smooth each Taylor residual image to multiple scale sizes - Solve a NTxNT system of equations per scale size to compute Taylor coefficients for components at all locations - Compute gradient chi-square and pick the Taylor coefficients and scale size at the location with maximum reduction in chi-square - Add multi-scale components to each Taylor-coefficient model image - Subtract scaled,smoothed,shifted PSF (within a small patch size per scale) from all smoothed Taylor residual images - Repeat from step 2 mem : Maximum Entropy Method [Cornwell and Evans, 1985] - Iteratively solve for values at all individual pixels via the MEM method. It minimizes an objective function of chi-square plus entropy (here, a measure of difference between the current model and a flat prior model). (Note : This MEM implementation is not very robust. Improvements will be made in the future.) scales List of scale sizes (in pixels) for multi-scale and mtmfs algorithms. --> scales=[0,6,20] This set of scale sizes should represent the sizes (diameters in units of number of pixels) of dominant features in the image being reconstructed. The smallest scale size is recommended to be 0 (point source), the second the size of the synthesized beam and the third 3-5 times the synthesized beam, etc. For example, if the synthesized beam is 10" FWHM and cell=2",try scales = [0,5,15]. For numerical stability, the largest scale must be smaller than the image (or mask) size and smaller than or comparable to the scale corresponding to the lowest measured spatial frequency (as a scale size much larger than what the instrument is sensitive to is unconstrained by the data making it harder to recovery from errors during the minor cycle). nterms Number of Taylor coefficients in the spectral model - nterms=1 : Assume flat spectrum source - nterms=2 : Spectrum is a straight line with a slope - nterms=N : A polynomial of order N-1 From a Taylor expansion of the expression of a power law, the spectral index is derived as alpha = taylorcoeff_1 / taylorcoeff_0 Spectral curvature is similarly derived when possible. The optimal number of Taylor terms depends on the available signal to noise ratio, bandwidth ratio, and spectral shape of the source as seen by the telescope (sky spectrum x PB spectrum). nterms=2 is a good starting point for wideband EVLA imaging and the lower frequency bands of ALMA (when fractional bandwidth is greater than 10%) and if there is at least one bright source for which a dynamic range of greater than few 100 is desired. Spectral artifacts for the VLA often look like spokes radiating out from a bright source (i.e. in the image made with standard mfs imaging). If increasing the number of terms does not eliminate these artifacts, check the data for inadequate bandpass calibration. If the source is away from the pointing center, consider including wide-field corrections too. (Note : In addition to output Taylor coefficient images .tt0,.tt1,etc images of spectral index (.alpha), an estimate of error on spectral index (.alpha.error) and spectral curvature (.beta, if nterms is greater than 2) are produced. - These alpha, alpha.error and beta images contain internal T/F masks based on a threshold computed as peakresidual/10. Additional masking based on .alpha/.alpha.error may be desirable. - .alpha.error is a purely empirical estimate derived from the propagation of error during the division of two noisy numbers (alpha = xx.tt1/xx.tt0) where the 'error' on tt1 and tt0 are simply the values picked from the corresponding residual images. The absolute value of the error is not always accurate and it is best to interpret the errors across the image only in a relative sense.) smallscalebias A numerical control to bias the scales when using multi-scale or mtmfs algorithms. The peak from each scale's smoothed residual is multiplied by ( 1 - smallscalebias * scale/maxscale ) to increase or decrease the amplitude relative to other scales, before the scale with the largest peak is chosen. Smallscalebias can be varied between -1.0 and 1.0. A score of 0.0 gives all scales equal weight (default). A score larger than 0.0 will bias the solution towards smaller scales. A score smaller than 0.0 will bias the solution towards larger scales. The effect of smallscalebias is more pronounced when using multi-scale relative to mtmfs. restoration e. Construct a restored image : imagename.image by convolving the model image with a clean beam and adding the residual image to the result. If a restoringbeam is specified, the residual image is also smoothed to that target resolution before adding it in. If a .model does not exist, it will make an empty one and create the restored image from the residuals ( with additional smoothing if needed ). With algorithm='mtmfs', this will construct Taylor coefficient maps from the residuals and compute .alpha and .alpha.error. restoringbeam ize to use. - restoringbeam='' or [''] A Gaussian fitted to the PSF main lobe (separately per image plane). - restoringbeam='10.0arcsec' Use a circular Gaussian of this width for all planes - restoringbeam=['8.0arcsec','10.0arcsec','45deg'] Use this elliptical Gaussian for all planes - restoringbeam='common' Automatically estimate a common beam shape/size appropriate for all planes. Note : For any restoring beam different from the native resolution the model image is convolved with the beam and added to residuals that have been convolved to the same target resolution. pbcor the output restored image A new image with extension .image.pbcor will be created from the evaluation of .image / .pb for all pixels above the specified pblimit. Note : Stand-alone PB-correction can be triggered by re-running tclean with the appropriate imagename and with niter=0, calcpsf=False, calcres=False, pbcor=True, vptable='vp.tab' ( where vp.tab is the name of the vpmanager file. See the inline help for the 'vptable' parameter ) Note : Multi-term PB correction that includes a correction for the spectral index of the PB has not been enabled for the 4.7 release. Please use the widebandpbcor task instead. ( Wideband PB corrections are required when the amplitude of the brightest source is known accurately enough to be sensitive to the difference in the PB gain between the upper and lower end of the band at its location. As a guideline, the artificial spectral index due to the PB is -1.4 at the 0.5 gain level and less than -0.2 at the 0.9 gain level at the middle frequency ) outlierfile Name of outlier-field image definitions A text file containing sets of parameter=value pairs, one set per outlier field. Example : outlierfile='outs.txt' Contents of outs.txt : imagename=tst1 nchan=1 imsize=[80,80] cell=[8.0arcsec,8.0arcsec] phasecenter=J2000 19:58:40.895 +40.55.58.543 mask=circle[[40pix,40pix],10pix] imagename=tst2 nchan=1 imsize=[100,100] cell=[8.0arcsec,8.0arcsec] phasecenter=J2000 19:58:40.895 +40.56.00.000 mask=circle[[60pix,60pix],20pix] The following parameters are currently allowed to be different between the main field and the outlier fields (i.e. they will be recognized if found in the outlier text file). If a parameter is not listed, the value is picked from what is defined in the main task input. imagename, imsize, cell, phasecenter, startmodel, mask specmode, nchan, start, width, nterms, reffreq, gridder, deconvolver, wprojplanes Note : 'specmode' is an option, so combinations of mfs and cube for different image fields, for example, are supported. 'deconvolver' and 'gridder' are also options that allow different imaging or deconvolution algorithm per image field. For example, multiscale with wprojection and 16 w-term planes on the main field and mtmfs with nterms=3 and wprojection with 64 planes on a bright outlier source for which the frequency dependence of the primary beam produces a strong effect that must be modeled. The traditional alternative to this approach is to first image the outlier, subtract it out of the data (uvsub) and then image the main field. Note : If you encounter a use-case where some other parameter needs to be allowed in the outlier file (and it is logical to do so), please send us feedback. The above is an initial list. weighting Weighting scheme (natural,uniform,briggs,superuniform,radial, briggsabs, briggsbwtaper) During gridding of the dirty or residual image, each visibility value is multiplied by a weight before it is accumulated on the uv-grid. The PSF's uv-grid is generated by gridding only the weights (weightgrid). weighting='natural' : Gridding weights are identical to the data weights from the MS. For visibilities with similar data weights, the weightgrid will follow the sample density pattern on the uv-plane. This weighting scheme provides the maximum imaging sensitivity at the expense of a possibly fat PSF with high sidelobes. It is most appropriate for detection experiments where sensitivity is most important. weighting='uniform' : Gridding weights per visibility data point are the original data weights divided by the total weight of all data points that map to the same uv grid cell : ' data_weight / total_wt_per_cell '. The weightgrid is as close to flat as possible resulting in a PSF with a narrow main lobe and suppressed sidelobes. However, since heavily sampled areas of the uv-plane get down-weighted, the imaging sensitivity is not as high as with natural weighting. It is most appropriate for imaging experiments where a well behaved PSF can help the reconstruction. weighting='briggs' : Gridding weights per visibility data point are given by 'data_weight / ( A *total_wt_per_cell + B ) ' where A and B vary according to the 'robust' parameter. robust = -2.0 maps to A=1,B=0 or uniform weighting. robust = +2.0 maps to natural weighting. (robust=0.5 is equivalent to robust=0.0 in AIPS IMAGR.) Robust/Briggs weighting generates a PSF that can vary smoothly between 'natural' and 'uniform' and allow customized trade-offs between PSF shape and imaging sensitivity. weighting='briggsabs' : Experimental option. Same as Briggs except the formula is different A= robust*robust and B is dependent on the noise per visibility estimated. Giving noise='0Jy' is a not a reasonable option. In this mode (or formula) robust values from -2.0 to 0.0 only make sense (2.0 and -2.0 will get the same weighting) weighting='superuniform' : This is similar to uniform weighting except that the total_wt_per_cell is replaced by the total_wt_within_NxN_cells around the uv cell of interest. ( N = subparameter 'npixels' ) This method tends to give a PSF with inner sidelobes that are suppressed as in uniform weighting but with far-out sidelobes closer to natural weighting. The peak sensitivity is also closer to natural weighting. weighting='radial' : Gridding weights are given by ' data_weight * uvdistance ' This method approximately minimizes rms sidelobes for an east-west synthesis array. weighting='briggsbwtaper' : A modified version of Briggs weighting for cubes where an inverse uv taper, which is proportional to the fractional bandwidth of the entire cube, is applied per channel. The objective is to modify cube (perchanweightdensity = True) imaging weights to have a similar density to that of the continuum imaging weights. This is currently an experimental weighting scheme being developed for ALMA. For more details on weighting please see Chapter3 of Dan Briggs' thesis (http://www.aoc.nrao.edu/dissertations/dbriggs) robust Robustness parameter for Briggs weighting. robust = -2.0 maps to uniform weighting. robust = +2.0 maps to natural weighting. (robust=0.5 is equivalent to robust=0.0 in AIPS IMAGR.) noise noise parameter for briggs abs mode weighting npixels Number of pixels to determine uv-cell size for super-uniform weighting (0 defaults to -/+ 3 pixels) npixels -- uv-box used for weight calculation a box going from -npixel/2 to +npixel/2 on each side around a point is used to calculate weight density. npixels=2 goes from -1 to +1 and covers 3 pixels on a side. npixels=0 implies a single pixel, which does not make sense for superuniform weighting. Therefore, if npixels=0 it will be forced to 6 (or a box of -3pixels to +3pixels) to cover 7 pixels on a side. uvtaper uv-taper on outer baselines in uv-plane Apply a Gaussian taper in addition to the weighting scheme specified via the 'weighting' parameter. Higher spatial frequencies are weighted down relative to lower spatial frequencies to suppress artifacts arising from poorly sampled areas of the uv-plane. It is equivalent to smoothing the PSF obtained by other weighting schemes and can be specified either as a Gaussian in uv-space (eg. units of lambda) or as a Gaussian in the image domain (eg. angular units like arcsec). uvtaper = [bmaj, bmin, bpa] NOTE: the on-sky FWHM in arcsec is roughly the uv taper/200 (klambda). default: uvtaper=[]; no Gaussian taper applied example: uvtaper=['5klambda'] circular taper FWHM=5 kilo-lambda uvtaper=['5klambda','3klambda','45.0deg'] uvtaper=['10arcsec'] on-sky FWHM 10 arcseconds uvtaper=['300.0'] default units are lambda in aperture plane niter Maximum number of iterations A stopping criterion based on total iteration count. Currently the parameter type is defined as an integer therefore the integer value larger than 2147483647 will not be set properly as it causes an overflow. Iterations are typically defined as the selecting one flux component and partially subtracting it out from the residual image. niter=0 : Do only the initial major cycle (make dirty image, psf, pb, etc) niter larger than zero : Run major and minor cycles. Note : Global stopping criteria vs major-cycle triggers In addition to global stopping criteria, the following rules are used to determine when to terminate a set of minor cycle iterations and trigger major cycles [derived from Cotton-Schwab Clean, 1984] 'cycleniter' : controls the maximum number of iterations per image plane before triggering a major cycle. 'cyclethreshold' : Automatically computed threshold related to the max sidelobe level of the PSF and peak residual. Divergence, detected as an increase of 10% in peak residual from the minimum so far (during minor cycle iterations) The first criterion to be satisfied takes precedence. Note : Iteration counts for cubes or multi-field images : For images with multiple planes (or image fields) on which the deconvolver operates in sequence, iterations are counted across all planes (or image fields). The iteration count is compared with 'niter' only after all channels/planes/fields have completed their minor cycles and exited either due to 'cycleniter' or 'cyclethreshold'. Therefore, the actual number of iterations reported in the logger can sometimes be larger than the user specified value in 'niter'. For example, with niter=100, cycleniter=20,nchan=10,threshold=0, a total of 200 iterations will be done in the first set of minor cycles before the total is compared with niter=100 and it exits. Note : Additional global stopping criteria include - no change in peak residual across two major cycles - a 50% or more increase in peak residual across one major cycle gain Loop gain Fraction of the source flux to subtract out of the residual image for the CLEAN algorithm and its variants. A low value (0.2 or less) is recommended when the sky brightness distribution is not well represented by the basis functions used by the chosen deconvolution algorithm. A higher value can be tried when there is a good match between the true sky brightness structure and the basis function shapes. For example, for extended emission, multiscale clean with an appropriate set of scale sizes will tolerate a higher loop gain than Clark clean (for example). threshold Stopping threshold (number in units of Jy, or string) A global stopping threshold that the peak residual (within clean mask) across all image planes is compared to. threshold = 0.005 : 5mJy threshold = '5.0mJy' Note : A 'cyclethreshold' is internally computed and used as a major cycle trigger. It is related what fraction of the PSF can be reliably used during minor cycle updates of the residual image. By default the minor cycle iterations terminate once the peak residual reaches the first sidelobe level of the brightest source. 'cyclethreshold' is computed as follows using the settings in parameters 'cyclefactor','minpsffraction','maxpsffraction','threshold' : psf_fraction = max_psf_sidelobe_level * 'cyclefactor' psf_fraction = max(psf_fraction, 'minpsffraction'); psf_fraction = min(psf_fraction, 'maxpsffraction'); cyclethreshold = peak_residual * psf_fraction cyclethreshold = max( cyclethreshold, 'threshold' ) If nsigma is set (>0.0), the N-sigma threshold is calculated (see the description under nsigma), then cyclethreshold is further modified as, cyclethreshold = max( cyclethreshold, nsgima_threshold ) 'cyclethreshold' is made visible and editable only in the interactive GUI when tclean is run with interactive=True. nsigma Multiplicative factor for rms-based threshold stopping N-sigma threshold is calculated as nsigma * rms value per image plane determined from a robust statistics. For nsigma > 0.0, in a minor cycle, a maximum of the two values, the N-sigma threshold and cyclethreshold, is used to trigger a major cycle (see also the descreption under 'threshold'). Set nsigma=0.0 to preserve the previous tclean behavior without this feature. The top level parameter, fastnoise is relevant for the rms noise calculation which is used to determine the threshold. The parameter 'nsigma' may be an int, float, or a double. cycleniter Maximum number of minor-cycle iterations (per plane) before triggering a major cycle For example, for a single plane image, if niter=100 and cycleniter=20, there will be 5 major cycles after the initial one (assuming there is no threshold based stopping criterion). At each major cycle boundary, if the number of iterations left over (to reach niter) is less than cycleniter, it is set to the difference. Note : cycleniter applies per image plane, even if cycleniter x nplanes gives a total number of iterations greater than 'niter'. This is to preserve consistency across image planes within one set of minor cycle iterations. cyclefactor Scaling on PSF sidelobe level to compute the minor-cycle stopping threshold. Please refer to the Note under the documentation for 'threshold' that discussed the calculation of 'cyclethreshold' cyclefactor=1.0 results in a cyclethreshold at the first sidelobe level of the brightest source in the residual image before the minor cycle starts. cyclefactor=0.5 allows the minor cycle to go deeper. cyclefactor=2.0 triggers a major cycle sooner. minpsffraction PSF fraction that marks the max depth of cleaning in the minor cycle Please refer to the Note under the documentation for 'threshold' that discussed the calculation of 'cyclethreshold' For example, minpsffraction=0.5 will stop cleaning at half the height of the peak residual and trigger a major cycle earlier. maxpsffraction PSF fraction that marks the minimum depth of cleaning in the minor cycle Please refer to the Note under the documentation for 'threshold' that discussed the calculation of 'cyclethreshold' For example, maxpsffraction=0.8 will ensure that at least the top 20 percent of the source will be subtracted out in the minor cycle even if the first PSF sidelobe is at the 0.9 level (an extreme example), or if the cyclefactor is set too high for anything to get cleaned. interactive Modify masks and parameters at runtime interactive=True will trigger an interactive GUI at every major cycle boundary (after the major cycle and before the minor cycle). The interactive mode is currently not available for parallel cube imaging (please also refer to the Note under the documentation for 'parallel' below). Options for runtime parameter modification are : Interactive clean mask : Draw a 1/0 mask (appears as a contour) by hand. If a mask is supplied at the task interface or if automasking is invoked, the current mask is displayed in the GUI and is available for manual editing. Note : If a mask contour is not visible, please check the cursor display at the bottom of GUI to see which parts of the mask image have ones and zeros. If the entire mask=1 no contours will be visible. Operation buttons : -- Stop execution now (restore current model and exit) -- Continue on until global stopping criteria are reached without stopping for any more interaction -- Continue with minor cycles and return for interaction after the next major cycle. Iteration control : -- max cycleniter : Trigger for the next major cycle The display begins with [ min( cycleniter, niter - itercount ) ] and can be edited by hand. -- iterations left : The display begins with [niter-itercount ] and can be edited to increase or decrease the total allowed niter. -- threshold : Edit global stopping threshold -- cyclethreshold : The display begins with the automatically computed value (see Note in help for 'threshold'), and can be edited by hand. All edits will be reflected in the log messages that appear once minor cycles begin. [ For scripting purposes, replacing True/False with 1/0 will get tclean to return an imaging summary dictionary to python ] usemask Type of mask(s) to be used for deconvolution user: (default) mask image(s) or user specified region file(s) or string CRTF expression(s) subparameters: mask, pbmask pb: primary beam mask subparameter: pbmask Example: usemask="pb", pbmask=0.2 Construct a mask at the 0.2 pb gain level. (Currently, this option will work only with gridders that produce .pb (i.e. mosaic and awproject) or if an externally produced .pb image exists on disk) auto-multithresh : auto-masking by multiple thresholds for deconvolution subparameters : sidelobethreshold, noisethreshold, lownoisethreshold, negativethrehsold, smoothfactor, minbeamfrac, cutthreshold, pbmask, growiterations, dogrowprune, minpercentchange, verbose Additional top level parameter relevant to auto-multithresh: fastnoise if pbmask is >0.0, the region outside the specified pb gain level is excluded from image statistics in determination of the threshold. Note: By default the intermediate mask generated by automask at each deconvolution cycle is over-written in the next cycle but one can save them by setting the environment variable, SAVE_ALL_AUTOMASKS="true". (e.g. in the CASA prompt, os.environ['SAVE_ALL_AUTOMASKS']="true" ) The saved CASA mask image name will be imagename.mask.autothresh#, where # is the iteration cycle number. mask Mask (a list of image name(s) or region file(s) or region string(s) The name of a CASA image or region file or region string that specifies a 1/0 mask to be used for deconvolution. Only locations with value 1 will be considered for the centers of flux components in the minor cycle. If regions specified fall completely outside of the image, tclean will throw an error. Manual mask options/examples : mask='xxx.mask' : Use this CASA image named xxx.mask and containing ones and zeros as the mask. If the mask is only different in spatial coordinates from what is being made it will be resampled to the target coordinate system before being used. The mask has to have the same shape in velocity and Stokes planes as the output image. Exceptions are single velocity and/or single Stokes plane masks. They will be expanded to cover all velocity and/or Stokes planes of the output cube. [ Note : If an error occurs during image resampling or if the expected mask does not appear, please try using tasks 'imregrid' or 'makemask' to resample the mask image onto a CASA image with the target shape and coordinates and supply it via the 'mask' parameter. ] mask='xxx.crtf' : A text file with region strings and the following on the first line ( #CRTFv0 CASA Region Text Format version 0 ) This is the format of a file created via the viewer's region tool when saved in CASA region file format. mask='circle[[40pix,40pix],10pix]' : A CASA region string. mask=['xxx.mask','xxx.crtf', 'circle[[40pix,40pix],10pix]'] : a list of masks Note : Mask images for deconvolution must contain 1 or 0 in each pixel. Such a mask is different from an internal T/F mask that can be held within each CASA image. These two types of masks are not automatically interchangeable, so please use the makemask task to copy between them if you need to construct a 1/0 based mask from a T/F one. Note : Work is in progress to generate more flexible masking options and enable more controls. pbmask Sub-parameter for usemask='auto-multithresh': primary beam mask Examples : pbmask=0.0 (default, no pb mask) pbmask=0.2 (construct a mask at the 0.2 pb gain level) sidelobethreshold Sub-parameter for "auto-multithresh": mask threshold based on sidelobe levels: sidelobethreshold * max_sidelobe_level * peak residual noisethreshold Sub-parameter for "auto-multithresh": mask threshold based on the noise level: noisethreshold * rms + location (=median) The rms is calculated from MAD with rms = 1.4826*MAD. lownoisethreshold Sub-parameter for "auto-multithresh": mask threshold to grow previously masked regions via binary dilation: lownoisethreshold * rms in residual image + location (=median) The rms is calculated from MAD with rms = 1.4826*MAD. negativethreshold Sub-parameter for "auto-multithresh": mask threshold for negative features: -1.0* negativethreshold * rms + location(=median) The rms is calculated from MAD with rms = 1.4826*MAD. smoothfactor Sub-parameter for "auto-multithresh": smoothing factor in a unit of the beam minbeamfrac Sub-parameter for "auto-multithresh": minimum beam fraction in size to prune masks smaller than mimbeamfrac * beam <=0.0 : No pruning cutthreshold Sub-parameter for "auto-multithresh": threshold to cut the smoothed mask to create a final mask: cutthreshold * peak of the smoothed mask growiterations Sub-parameter for "auto-multithresh": Maximum number of iterations to perform using binary dilation for growing the mask dogrowprune Experimental sub-parameter for "auto-multithresh": Do pruning on the grow mask minpercentchange If the change in the mask size in a particular channel is less than minpercentchange, stop masking that channel in subsequent cycles. This check is only applied when noise based threshold is used and when the previous clean major cycle had a cyclethreshold value equal to the clean threshold. Values equal to -1.0 (or any value less than 0.0) will turn off this check (the default). Automask will still stop masking if the current channel mask is an empty mask and the noise threshold was used to determine the mask. verbose he summary of automasking at the end of each automasking process is printed in the logger. Following information per channel will be listed in the summary. chan: channel number masking?: F - stop updating automask for the subsequent iteration cycles RMS: robust rms noise peak: peak in residual image thresh_type: type of threshold used (noise or sidelobe) thresh_value: the value of threshold used N_reg: number of the automask regions N_pruned: number of the automask regions removed by pruning N_grow: number of the grow mask regions N_grow_pruned: number of the grow mask regions removed by pruning N_neg_pix: number of pixels for negative mask regions Note that for a large cube, extra logging may slow down the process. fastnoise mask (user='multi-autothresh') and/or n-sigma stopping threshold (nsigma>0.0) are/is used. If it is set to True, a simpler but faster noise calucation is used. In this case, the threshold values are determined based on classic statistics (using all unmasked pixels for the calculations). If it is set to False, the new noise calculation method is used based on pre-existing mask. Case 1: no exiting mask Calculate image statistics using Chauvenet algorithm Case 2: there is an existing mask Calculate image statistics by classical method on the region outside the mask and inside the primary beam mask. In all cases above RMS noise is calculated from MAD. restart images (and start from an existing model image) or automatically increment the image name and make a new image set. True : Re-use existing images. If imagename.model exists the subsequent run will start from this model (i.e. predicting it using current gridder settings and starting from the residual image). Care must be taken when combining this option with startmodel. Currently, only one or the other can be used. startmodel='', imagename.model exists : - Start from imagename.model startmodel='xxx', imagename.model does not exist : - Start from startmodel startmodel='xxx', imagename.model exists : - Exit with an error message requesting the user to pick only one model. This situation can arise when doing one run with startmodel='xxx' to produce an output imagename.model that includes the content of startmodel, and wanting to restart a second run to continue deconvolution. Startmodel should be set to '' before continuing. If any change in the shape or coordinate system of the image is desired during the restart, please change the image name and use the startmodel (and mask) parameter(s) so that the old model (and mask) can be regridded to the new coordinate system before starting. False : A convenience feature to increment imagename with '_1', '_2', etc as suffixes so that all runs of tclean are fresh starts (without having to change the imagename parameter or delete images). This mode will search the current directory for all existing imagename extensions, pick the maximum, and adds 1. For imagename='try' it will make try.psf, try_2.psf, try_3.psf, etc. This also works if you specify a directory name in the path : imagename='outdir/try'. If './outdir' does not exist, it will create it. Then it will search for existing filenames inside that directory. If outlier fields are specified, the incrementing happens for each of them (since each has its own 'imagename'). The counters are synchronized across imagefields, to make it easier to match up sets of output images. It adds 1 to the 'max id' from all outlier names on disk. So, if you do two runs with only the main field (imagename='try'), and in the third run you add an outlier with imagename='outtry', you will get the following image names for the third run : 'try_3' and 'outtry_3' even though 'outry' and 'outtry_2' have not been used. savemodel Options to save model visibilities (none, virtual, modelcolumn) Often, model visibilities must be created and saved in the MS to be later used for self-calibration (or to just plot and view them). none : Do not save any model visibilities in the MS. The MS is opened in readonly mode. Model visibilities can be predicted in a separate step by restarting tclean with niter=0,savemodel=virtual or modelcolumn and not changing any image names so that it finds the .model on disk (or by changing imagename and setting startmodel to the original imagename). virtual : In the last major cycle, save the image model and state of the gridder used during imaging within the SOURCE subtable of the MS. Images required for de-gridding will also be stored internally. All future references to model visibilities will activate the (de)gridder to compute them on-the-fly. This mode is useful when the dataset is large enough that an additional model data column on disk may be too much extra disk I/O, when the gridder is simple enough that on-the-fly recomputing of the model visibilities is quicker than disk I/O. For e.g. that gridder='awproject' does not support virtual model. modelcolumn : In the last major cycle, save predicted model visibilities in the MODEL_DATA column of the MS. This mode is useful when the de-gridding cost to produce the model visibilities is higher than the I/O required to read the model visibilities from disk. This mode is currently required for gridder='awproject'. This mode is also required for the ability to later pull out model visibilities from the MS into a python array for custom processing. Note 1 : The imagename.model image on disk will always be constructed if the minor cycle runs. This savemodel parameter applies only to model visibilities created by de-gridding the model image. Note 2 : It is possible for an MS to have both a virtual model as well as a model_data column, but under normal operation, the last used mode will get triggered. Use the delmod task to clear out existing models from an MS if confusion arises. Note 3: when parallel=True, use savemodel='none'; Other options are not yet ready for use in parallel. If model visibilities need to be saved (virtual or modelcolumn): please run tclean in serial mode with niter=0; after the parallel run calcres Calculate initial residual image This parameter controls what the first major cycle does. calcres=False with niter greater than 0 will assume that a .residual image already exists and that the minor cycle can begin without recomputing it. calcres=False with niter=0 implies that only the PSF will be made and no data will be gridded. calcres=True requires that calcpsf=True or that the .psf and .sumwt images already exist on disk (for normalization purposes). Usage example : For large runs (or a pipeline scripts) it may be useful to first run tclean with niter=0 to create an initial .residual to look at and perhaps make a custom mask for. Imaging can be resumed without recomputing it. calcpsf Calculate PSF This parameter controls what the first major cycle does. calcpsf=False will assume that a .psf image already exists and that the minor cycle can begin without recomputing it. psfcutoff When the .psf image is created a 2 dimensional Gaussian is fit to the main lobe of the PSF. Which pixels in the PSF are fitted is determined by psfcutoff. The default value of psfcutoff is 0.35 and can varied from 0.01 to 0.99. Fitting algorithm: - A region of 41 x 41 pixels around the peak of the PSF is compared against the psfcutoff. Sidelobes are ignored by radially searching from the PSF peak. - Calculate the bottom left corner (blc) and top right corner (trc) from the points. Expand blc and trc with a number of pixels (5). - Create a new sub-matrix from blc and trc. - Interpolate matrix to a target number of points (3001) using CUBIC spline. - All the non-sidelobe points, in the interpolated matrix, that are above the psfcutoff are used to fit a Gaussian. A Levenberg-Marquardt algorithm is used. - If the fitting fails the algorithm is repeated with the psfcutoff decreased (psfcutoff=psfcutoff/1.5). A message in the log will apear if the fitting fails along with the new value of psfcutoff. This will be done up to 50 times if fitting fails. This Gaussian beam is defined by a major axis, minor axis, and position angle. During the restoration process, this Gaussian beam is used as the Clean beam. Varying psfcutoff might be useful for producing a better fit for highly non-Gaussian PSFs, however, the resulting fits should be carefully checked. This parameter should rarely be changed. (This is not the support size for clark clean.) parallel Run major cycles in parallel (this feature is experimental) Parallel tclean will run only if casa has already been started using mpirun. Please refer to HPC documentation for details on how to start this on your system. Example : mpirun -n 3 -xterm 0 `which casa` Continuum Imaging : - Data are partitioned (in time) into NProc pieces - Gridding/iFT is done separately per partition - Images (and weights) are gathered and then normalized - One non-parallel minor cycle is run - Model image is scattered to all processes - Major cycle is done in parallel per partition Cube Imaging : - Data and Image coordinates are partitioned (in freq) into NProc pieces - Each partition is processed independently (major and minor cycles) - All processes are synchronized at major cycle boundaries for convergence checks - At the end, cubes from all partitions are concatenated along the spectral axis Note 1 : Iteration control for cube imaging is independent per partition. - There is currently no communication between them to synchronize information such as peak residual and cyclethreshold. Therefore, different chunks may trigger major cycles at different levels. - For cube imaging in parallel, there is currently no interactive masking. (Proper synchronization of iteration control is work in progress.) RETURNS void --------- examples ----------------------------------------------------------- This is the first release of our refactored imager code. Although most features have been used and validated, there are many details that have not been thoroughly tested. Feedback will be much appreciated. Usage Examples : ----------------------- (A) A suite of test programs that demo all usable modes of tclean on small test datasets https://svn.cv.nrao.edu/svn/casa/branches/release-4_5/gcwrap/python/scripts/tests/test_refimager.py (B) A set of demo examples for ALMA imaging https://casaguides.nrao.edu/index.php/TCLEAN_and_ALMA """ _info_group_ = """imaging""" _info_desc_ = """Parallelized tclean in consecutive time steps""" __schema = {'vis': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'imageprefix': {'type': 'cStr', 'coerce': _coerce.to_str}, 'imagesuffix': {'type': 'cStr', 'coerce': _coerce.to_str}, 'ncpu': {'type': 'cInt'}, 'twidth': {'type': 'cInt'}, 'doreg': {'type': 'cBool'}, 'usephacenter': {'type': 'cBool'}, 'reftime': {'type': 'cStr', 'coerce': _coerce.to_str}, 'toTb': {'type': 'cBool'}, 'sclfactor': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'subregion': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'docompress': {'type': 'cBool'}, 'overwrite': {'type': 'cBool'}, 'selectdata': {'type': 'cBool'}, 'field': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'spw': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'timerange': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'uvrange': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'antenna': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'scan': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'observation': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cInt'}]}, 'intent': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'datacolumn': {'type': 'cStr', 'coerce': _coerce.to_str}, 'imagename': {'anyof': [{'type': 'cInt'}, {'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'imsize': {'anyof': [{'type': 'cInt'}, {'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}]}, 'cell': {'anyof': [{'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}, {'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cFloat', 'coerce': _coerce.to_float}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}, {'type': 'cInt'}, {'type': 'cFloatVec', 'coerce': [_coerce.to_list,_coerce.to_floatvec]}]}, 'phasecenter': {'anyof': [{'type': 'cInt'}, {'type': 'cStr', 'coerce': _coerce.to_str}]}, 'stokes': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'I', 'IQUV', 'UV', 'RRLL', 'IQ', 'V', 'pseudoI', 'QU', 'YY', 'RR', 'Q', 'U', 'IV', 'XX', 'XXYY', 'LL' ]}, 'projection': {'type': 'cStr', 'coerce': _coerce.to_str}, 'startmodel': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'specmode': {'type': 'cVariant', 'coerce': [_coerce.to_variant] # <allowed> IS NOT ALLOWED FOR A PARAMETER OF TYPE any }, 'reffreq': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'nchan': {'type': 'cInt'}, 'start': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'width': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'outframe': {'type': 'cStr', 'coerce': _coerce.to_str}, 'veltype': {'type': 'cStr', 'coerce': _coerce.to_str}, 'restfreq': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'interpolation': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'nearest', 'linear', 'cubic' ]}, 'perchanweightdensity': {'type': 'cBool'}, 'gridder': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'widefield', 'wproject', 'imagemosaic', 'standard', 'awproject', 'wprojectft', 'mosaicft', 'ft', 'ftmosaic', 'mosaic', 'awprojectft', 'gridft' ]}, 'facets': {'type': 'cInt'}, 'psfphasecenter': {'anyof': [{'type': 'cInt'}, {'type': 'cStr', 'coerce': _coerce.to_str}]}, 'wprojplanes': {'type': 'cInt'}, 'vptable': {'type': 'cStr', 'coerce': _coerce.to_str}, 'mosweight': {'type': 'cBool'}, 'aterm': {'type': 'cBool'}, 'psterm': {'type': 'cBool'}, 'wbawp': {'type': 'cBool'}, 'conjbeams': {'type': 'cBool'}, 'cfcache': {'type': 'cStr', 'coerce': _coerce.to_str}, 'usepointing': {'type': 'cBool'}, 'computepastep': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'rotatepastep': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'pointingoffsetsigdev': {'anyof': [{'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}, {'type': 'cFloatVec', 'coerce': [_coerce.to_list,_coerce.to_floatvec]}]}, 'pblimit': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'normtype': {'type': 'cStr', 'coerce': _coerce.to_str}, 'deconvolver': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'clarkstokes_exp', 'mtmfs', 'mem', 'clarkstokes', 'hogbom', 'clark_exp', 'clark', 'multiscale' ]}, 'scales': {'anyof': [{'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}, {'type': 'cFloatVec', 'coerce': [_coerce.to_list,_coerce.to_floatvec]}]}, 'nterms': {'type': 'cInt'}, 'smallscalebias': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'restoration': {'type': 'cBool'}, 'restoringbeam': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'pbcor': {'type': 'cBool'}, 'outlierfile': {'type': 'cStr', 'coerce': _coerce.to_str}, 'weighting': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'briggsabs', 'briggs', 'briggsbwtaper', 'natural', 'radial', 'superuniform', 'uniform' ]}, 'robust': {'type': 'cFloat', 'coerce': _coerce.to_float, 'min': -2.0, 'max': 2.0}, 'noise': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'npixels': {'type': 'cInt'}, 'uvtaper': {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}, 'niter': {'type': 'cInt'}, 'gain': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'threshold': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'nsigma': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'cycleniter': {'type': 'cInt'}, 'cyclefactor': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'minpsffraction': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'maxpsffraction': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'interactive': {'anyof': [{'type': 'cBool'}, {'type': 'cInt'}]}, 'usemask': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'user', 'pb', 'auto-multithresh' ]}, 'mask': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'pbmask': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'sidelobethreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'noisethreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'lownoisethreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'negativethreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'smoothfactor': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'minbeamfrac': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'cutthreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'growiterations': {'type': 'cInt'}, 'dogrowprune': {'type': 'cBool'}, 'minpercentchange': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'verbose': {'type': 'cBool'}, 'fastnoise': {'type': 'cBool'}, 'restart': {'type': 'cBool'}, 'savemodel': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'none', 'virtual', 'modelcolumn' ]}, 'calcres': {'type': 'cBool'}, 'calcpsf': {'type': 'cBool'}, 'psfcutoff': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'parallel': {'type': 'cBool'}} def __init__(self): self.__stdout = None self.__stderr = None self.__root_frame_ = None def __globals_(self): if self.__root_frame_ is None: self.__root_frame_ = _find_frame( ) assert self.__root_frame_ is not None, "could not find CASAshell global frame" return self.__root_frame_ def __to_string_(self,value): if type(value) is str: return "'%s'" % value else: return str(value) def __validate_(self,doc,schema): return _pc.validate(doc,schema) def __do_inp_output(self,param_prefix,description_str,formatting_chars): out = self.__stdout or sys.stdout description = description_str.split( ) prefix_width = 23 + 23 + 4 output = [ ] addon = '' first_addon = True while len(description) > 0: ## starting a new line..................................................................... if len(output) == 0: ## for first line add parameter information............................................ if len(param_prefix)-formatting_chars > prefix_width - 1: output.append(param_prefix) continue addon = param_prefix + ' #' first_addon = True addon_formatting = formatting_chars else: ## for subsequent lines space over prefix width........................................ addon = (' ' * prefix_width) + '#' first_addon = False addon_formatting = 0 ## if first word of description puts us over the screen width, bail........................ if len(addon + description[0]) - addon_formatting + 1 > self.term_width: ## if we're doing the first line make sure it's output................................. if first_addon: output.append(addon) break while len(description) > 0: ## if the next description word puts us over break for the next line................... if len(addon + description[0]) - addon_formatting + 1 > self.term_width: break addon = addon + ' ' + description[0] description.pop(0) output.append(addon) out.write('\n'.join(output) + '\n') #--------- return nonsubparam values ---------------------------------------------- def __phasecenter_dflt( self, glb ): return '' def __phasecenter( self, glb ): if 'phasecenter' in glb: return glb['phasecenter'] return '' def __projection_dflt( self, glb ): return 'SIN' def __projection( self, glb ): if 'projection' in glb: return glb['projection'] return 'SIN' def __vis_dflt( self, glb ): return '' def __vis( self, glb ): if 'vis' in glb: return glb['vis'] return '' def __imagesuffix_dflt( self, glb ): return '' def __imagesuffix( self, glb ): if 'imagesuffix' in glb: return glb['imagesuffix'] return '' def __parallel_dflt( self, glb ): return False def __parallel( self, glb ): if 'parallel' in glb: return glb['parallel'] return False def __twidth_dflt( self, glb ): return int(1) def __twidth( self, glb ): if 'twidth' in glb: return glb['twidth'] return int(1) def __datacolumn_dflt( self, glb ): return 'corrected' def __datacolumn( self, glb ): if 'datacolumn' in glb: return glb['datacolumn'] return 'corrected' def __restart_dflt( self, glb ): return True def __restart( self, glb ): if 'restart' in glb: return glb['restart'] return True def __cell_dflt( self, glb ): return [ ] def __cell( self, glb ): if 'cell' in glb: return glb['cell'] return [ ] def __startmodel_dflt( self, glb ): return '' def __startmodel( self, glb ): if 'startmodel' in glb: return glb['startmodel'] return '' def __deconvolver_dflt( self, glb ): return 'hogbom' def __deconvolver( self, glb ): if 'deconvolver' in glb: return glb['deconvolver'] return 'hogbom' def __imsize_dflt( self, glb ): return [ int(100) ] def __imsize( self, glb ): if 'imsize' in glb: return glb['imsize'] return [ int(100) ] def __calcpsf_dflt( self, glb ): return True def __calcpsf( self, glb ): if 'calcpsf' in glb: return glb['calcpsf'] return True def __niter_dflt( self, glb ): return int(0) def __niter( self, glb ): if 'niter' in glb: return glb['niter'] return int(0) def __selectdata_dflt( self, glb ): return True def __selectdata( self, glb ): if 'selectdata' in glb: return glb['selectdata'] return True def __imageprefix_dflt( self, glb ): return '' def __imageprefix( self, glb ): if 'imageprefix' in glb: return glb['imageprefix'] return '' def __outlierfile_dflt( self, glb ): return '' def __outlierfile( self, glb ): if 'outlierfile' in glb: return glb['outlierfile'] return '' def __calcres_dflt( self, glb ): return True def __calcres( self, glb ): if 'calcres' in glb: return glb['calcres'] return True def __ncpu_dflt( self, glb ): return int(8) def __ncpu( self, glb ): if 'ncpu' in glb: return glb['ncpu'] return int(8) def __savemodel_dflt( self, glb ): return 'none' def __savemodel( self, glb ): if 'savemodel' in glb: return glb['savemodel'] return 'none' def __usemask_dflt( self, glb ): return 'user' def __usemask( self, glb ): if 'usemask' in glb: return glb['usemask'] return 'user' def __specmode_dflt( self, glb ): return 'mfs' def __specmode( self, glb ): if 'specmode' in glb: return glb['specmode'] return 'mfs' def __restoration_dflt( self, glb ): return True def __restoration( self, glb ): if 'restoration' in glb: return glb['restoration'] return True def __stokes_dflt( self, glb ): return 'I' def __stokes( self, glb ): if 'stokes' in glb: return glb['stokes'] return 'I' def __fastnoise_dflt( self, glb ): return True def __fastnoise( self, glb ): if 'fastnoise' in glb: return glb['fastnoise'] return True def __imagename_dflt( self, glb ): return '' def __imagename( self, glb ): if 'imagename' in glb: return glb['imagename'] return '' def __weighting_dflt( self, glb ): return 'natural' def __weighting( self, glb ): if 'weighting' in glb: return glb['weighting'] return 'natural' def __gridder_dflt( self, glb ): return 'standard' def __gridder( self, glb ): if 'gridder' in glb: return glb['gridder'] return 'standard' def __overwrite_dflt( self, glb ): return False def __overwrite( self, glb ): if 'overwrite' in glb: return glb['overwrite'] return False def __doreg_dflt( self, glb ): return False def __doreg( self, glb ): if 'doreg' in glb: return glb['doreg'] return False #--------- return inp/go default -------------------------------------------------- def __antenna_dflt( self, glb ): if self.__selectdata( glb ) == bool(True): return "" return None def __smoothfactor_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return float(1.0) return None def __negativethreshold_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return float(0.0) return None def __minbeamfrac_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return float(0.3) return None def __psfphasecenter_dflt( self, glb ): if self.__gridder( glb ) == "mosaic": return "" if self.__gridder( glb ) == "mosaicft": return "" return None def __mask_dflt( self, glb ): if self.__usemask( glb ) == "user": return "" return None def __sclfactor_dflt( self, glb ): if self.__doreg( glb ) == bool(True): return float(1.0) return None def __field_dflt( self, glb ): if self.__selectdata( glb ) == bool(True): return "" return None def __cutthreshold_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return float(0.01) return None def __pblimit_dflt( self, glb ): if self.__gridder( glb ) == "standard": return float(0.2) if self.__gridder( glb ) == "widefield": return float(0.2) if self.__gridder( glb ) == "wproject": return float(0.2) if self.__gridder( glb ) == "wprojectft": return float(0.2) if self.__gridder( glb ) == "mosaic": return float(0.2) if self.__gridder( glb ) == "mosaicft": return float(0.2) if self.__gridder( glb ) == "ftmosaic": return float(0.2) if self.__gridder( glb ) == "imagemosaic": return float(0.2) if self.__gridder( glb ) == "awproject": return float(0.2) if self.__gridder( glb ) == "awprojectft": return float(0.2) return None def __smallscalebias_dflt( self, glb ): if self.__deconvolver( glb ) == "multiscale": return float(0.0) if self.__deconvolver( glb ) == "mtmfs": return float(0.0) return None def __maxpsffraction_dflt( self, glb ): if self.__niter( glb ) != int(0): return float(0.8) return None def __verbose_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return bool(False) return None def __intent_dflt( self, glb ): if self.__selectdata( glb ) == bool(True): return "" return None def __noise_dflt( self, glb ): if self.__weighting( glb ) == "briggsabs": return "1.0Jy" return None def __interpolation_dflt( self, glb ): if self.__specmode( glb ) == "cube": return "linear" if self.__specmode( glb ) == "cubesource": return "linear" if self.__specmode( glb ) == "cubedata": return "linear" return None def __subregion_dflt( self, glb ): if self.__doreg( glb ) == bool(True): return "" return None def __nterms_dflt( self, glb ): if self.__deconvolver( glb ) == "mtmfs": return int(2) return None def __pointingoffsetsigdev_dflt( self, glb ): if self.__gridder( glb ) == "awproject": return [] if self.__gridder( glb ) == "awprojectft": return [] return None def __nchan_dflt( self, glb ): if self.__specmode( glb ) == "cube": return int(-1) if self.__specmode( glb ) == "cubesource": return int(-1) if self.__specmode( glb ) == "cubedata": return int(-1) return None def __reffreq_dflt( self, glb ): if self.__specmode( glb ) == "mfs": return "" return None def __conjbeams_dflt( self, glb ): if self.__gridder( glb ) == "mosaic": return bool(False) if self.__gridder( glb ) == "mosaicft": return bool(False) if self.__gridder( glb ) == "awproject": return bool(False) if self.__gridder( glb ) == "awprojectft": return bool(False) return None def __restoringbeam_dflt( self, glb ): if self.__restoration( glb ) == bool(True): return [] return None def __sidelobethreshold_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return float(3.0) return None def __reftime_dflt( self, glb ): if self.__doreg( glb ) == bool(True): return "" return None def __cycleniter_dflt( self, glb ): if self.__niter( glb ) != int(0): return int(-1) return None def __minpsffraction_dflt( self, glb ): if self.__niter( glb ) != int(0): return float(0.05) return None def __scan_dflt( self, glb ): if self.__selectdata( glb ) == bool(True): return "" return None def __computepastep_dflt( self, glb ): if self.__gridder( glb ) == "awproject": return float(360.0) if self.__gridder( glb ) == "awprojectft": return float(360.0) return None def __minpercentchange_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return float(-1.0) return None def __wbawp_dflt( self, glb ): if self.__gridder( glb ) == "awproject": return bool(True) if self.__gridder( glb ) == "awprojectft": return bool(True) return None def __docompress_dflt( self, glb ): if self.__doreg( glb ) == bool(True): return bool(False) return None def __interactive_dflt( self, glb ): if self.__niter( glb ) != int(0): return False return None def __npixels_dflt( self, glb ): if self.__weighting( glb ) == "briggs": return int(0) if self.__weighting( glb ) == "briggsabs": return int(0) return None def __mosweight_dflt( self, glb ): if self.__gridder( glb ) == "mosaic": return bool(True) if self.__gridder( glb ) == "ftmosaic": return bool(True) if self.__gridder( glb ) == "awproject": return bool(False) if self.__gridder( glb ) == "awprojectft": return bool(False) return None def __pbcor_dflt( self, glb ): if self.__restoration( glb ) == bool(True): return bool(False) return None def __normtype_dflt( self, glb ): if self.__gridder( glb ) == "mosaic": return "flatnoise" if self.__gridder( glb ) == "mosaicft": return "flatnoise" if self.__gridder( glb ) == "ftmosaic": return "flatnoise" if self.__gridder( glb ) == "imagemosaic": return "flatnoise" if self.__gridder( glb ) == "awproject": return "flatnoise" if self.__gridder( glb ) == "awprojectft": return "flatnoise" return None def __uvtaper_dflt( self, glb ): if self.__weighting( glb ) == "natural": return [] if self.__weighting( glb ) == "briggs": return [] if self.__weighting( glb ) == "briggsabs": return [] if self.__weighting( glb ) == "briggsbwtaper": return [] return None def __cyclefactor_dflt( self, glb ): if self.__niter( glb ) != int(0): return float(1.0) return None def __toTb_dflt( self, glb ): if self.__doreg( glb ) == bool(True): return bool(False) return None def __restfreq_dflt( self, glb ): if self.__specmode( glb ) == "cube": return [] if self.__specmode( glb ) == "cubesource": return [] if self.__specmode( glb ) == "cubedata": return [] return None def __pbmask_dflt( self, glb ): if self.__usemask( glb ) == "user": return float(0.0) if self.__usemask( glb ) == "pb": return float(0.2) if self.__usemask( glb ) == "auto-multithresh": return float(0.2) return None def __growiterations_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return int(75) return None def __gain_dflt( self, glb ): if self.__niter( glb ) != int(0): return float(0.1) return None def __scales_dflt( self, glb ): if self.__deconvolver( glb ) == "multiscale": return [] if self.__deconvolver( glb ) == "mtmfs": return [] return None def __psfcutoff_dflt( self, glb ): if self.__calcpsf( glb ) == bool(True): return float(0.35) return None def __robust_dflt( self, glb ): if self.__weighting( glb ) == "briggs": return float(0.5) if self.__weighting( glb ) == "briggsabs": return float(0.5) if self.__weighting( glb ) == "briggsbwtaper": return float(0.5) return None def __vptable_dflt( self, glb ): if self.__gridder( glb ) == "standard": return "" if self.__gridder( glb ) == "widefield": return "" if self.__gridder( glb ) == "wproject": return "" if self.__gridder( glb ) == "wprojectft": return "" if self.__gridder( glb ) == "mosaic": return "" if self.__gridder( glb ) == "mosaicft": return "" if self.__gridder( glb ) == "ftmosaic": return "" if self.__gridder( glb ) == "imagemosaic": return "" return None def __perchanweightdensity_dflt( self, glb ): if self.__specmode( glb ) == "cube": return bool(True) if self.__specmode( glb ) == "cubesource": return bool(True) if self.__specmode( glb ) == "cubedata": return bool(False) return None def __aterm_dflt( self, glb ): if self.__gridder( glb ) == "awproject": return bool(True) if self.__gridder( glb ) == "awprojectft": return bool(True) return None def __usephacenter_dflt( self, glb ): if self.__doreg( glb ) == bool(True): return bool(True) return None def __usepointing_dflt( self, glb ): if self.__gridder( glb ) == "mosaic": return bool(False) if self.__gridder( glb ) == "mosaicft": return bool(False) if self.__gridder( glb ) == "ftmosaic": return bool(False) if self.__gridder( glb ) == "awproject": return bool(False) if self.__gridder( glb ) == "awprojectft": return bool(False) return None def __rotatepastep_dflt( self, glb ): if self.__gridder( glb ) == "awproject": return float(360.0) if self.__gridder( glb ) == "awprojectft": return float(360.0) return None def __threshold_dflt( self, glb ): if self.__niter( glb ) != int(0): return 0.0 return None def __veltype_dflt( self, glb ): if self.__specmode( glb ) == "cube": return "radio" if self.__specmode( glb ) == "cubesource": return "radio" if self.__specmode( glb ) == "cubedata": return "radio" return None def __outframe_dflt( self, glb ): if self.__specmode( glb ) == "cube": return "" if self.__specmode( glb ) == "cubesource": return "REST" return None def __dogrowprune_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return bool(True) return None def __uvrange_dflt( self, glb ): if self.__selectdata( glb ) == bool(True): return "" return None def __psterm_dflt( self, glb ): if self.__gridder( glb ) == "awproject": return bool(False) if self.__gridder( glb ) == "awprojectft": return bool(False) return None def __start_dflt( self, glb ): if self.__specmode( glb ) == "cube": return "" if self.__specmode( glb ) == "cubesource": return "" if self.__specmode( glb ) == "cubedata": return "" return None def __observation_dflt( self, glb ): if self.__selectdata( glb ) == bool(True): return "" return None def __lownoisethreshold_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return float(1.5) return None def __facets_dflt( self, glb ): if self.__gridder( glb ) == "widefield": return int(1) return None def __noisethreshold_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return float(5.0) return None def __width_dflt( self, glb ): if self.__specmode( glb ) == "cube": return "" if self.__specmode( glb ) == "cubesource": return "" if self.__specmode( glb ) == "cubedata": return "" return None def __spw_dflt( self, glb ): if self.__selectdata( glb ) == bool(True): return "" return None def __timerange_dflt( self, glb ): if self.__selectdata( glb ) == bool(True): return "" return None def __nsigma_dflt( self, glb ): if self.__niter( glb ) != int(0): return float(0.0) return None def __cfcache_dflt( self, glb ): if self.__gridder( glb ) == "awproject": return "" if self.__gridder( glb ) == "awprojectft": return "" return None def __wprojplanes_dflt( self, glb ): if self.__gridder( glb ) == "widefield": return int(1) if self.__gridder( glb ) == "wproject": return int(1) if self.__gridder( glb ) == "wprojectft": return int(1) if self.__gridder( glb ) == "imagemosaic": return int(1) if self.__gridder( glb ) == "awproject": return int(1) if self.__gridder( glb ) == "awprojectft": return int(1) return None #--------- return subparam values ------------------------------------------------- def __usephacenter( self, glb ): if 'usephacenter' in glb: return glb['usephacenter'] dflt = self.__usephacenter_dflt( glb ) if dflt is not None: return dflt return True def __reftime( self, glb ): if 'reftime' in glb: return glb['reftime'] dflt = self.__reftime_dflt( glb ) if dflt is not None: return dflt return '' def __toTb( self, glb ): if 'toTb' in glb: return glb['toTb'] dflt = self.__toTb_dflt( glb ) if dflt is not None: return dflt return False def __sclfactor( self, glb ): if 'sclfactor' in glb: return glb['sclfactor'] dflt = self.__sclfactor_dflt( glb ) if dflt is not None: return dflt return float(1.0) def __subregion( self, glb ): if 'subregion' in glb: return glb['subregion'] dflt = self.__subregion_dflt( glb ) if dflt is not None: return dflt return '' def __docompress( self, glb ): if 'docompress' in glb: return glb['docompress'] dflt = self.__docompress_dflt( glb ) if dflt is not None: return dflt return False def __field( self, glb ): if 'field' in glb: return glb['field'] dflt = self.__field_dflt( glb ) if dflt is not None: return dflt return '' def __spw( self, glb ): if 'spw' in glb: return glb['spw'] dflt = self.__spw_dflt( glb ) if dflt is not None: return dflt return '' def __timerange( self, glb ): if 'timerange' in glb: return glb['timerange'] dflt = self.__timerange_dflt( glb ) if dflt is not None: return dflt return '' def __uvrange( self, glb ): if 'uvrange' in glb: return glb['uvrange'] dflt = self.__uvrange_dflt( glb ) if dflt is not None: return dflt return '' def __antenna( self, glb ): if 'antenna' in glb: return glb['antenna'] dflt = self.__antenna_dflt( glb ) if dflt is not None: return dflt return '' def __scan( self, glb ): if 'scan' in glb: return glb['scan'] dflt = self.__scan_dflt( glb ) if dflt is not None: return dflt return '' def __observation( self, glb ): if 'observation' in glb: return glb['observation'] dflt = self.__observation_dflt( glb ) if dflt is not None: return dflt return '' def __intent( self, glb ): if 'intent' in glb: return glb['intent'] dflt = self.__intent_dflt( glb ) if dflt is not None: return dflt return '' def __reffreq( self, glb ): if 'reffreq' in glb: return glb['reffreq'] dflt = self.__reffreq_dflt( glb ) if dflt is not None: return dflt return '' def __nchan( self, glb ): if 'nchan' in glb: return glb['nchan'] dflt = self.__nchan_dflt( glb ) if dflt is not None: return dflt return int(-1) def __start( self, glb ): if 'start' in glb: return glb['start'] dflt = self.__start_dflt( glb ) if dflt is not None: return dflt return '' def __width( self, glb ): if 'width' in glb: return glb['width'] dflt = self.__width_dflt( glb ) if dflt is not None: return dflt return '' def __outframe( self, glb ): if 'outframe' in glb: return glb['outframe'] dflt = self.__outframe_dflt( glb ) if dflt is not None: return dflt return 'LSRK' def __veltype( self, glb ): if 'veltype' in glb: return glb['veltype'] dflt = self.__veltype_dflt( glb ) if dflt is not None: return dflt return 'radio' def __restfreq( self, glb ): if 'restfreq' in glb: return glb['restfreq'] dflt = self.__restfreq_dflt( glb ) if dflt is not None: return dflt return [ ] def __interpolation( self, glb ): if 'interpolation' in glb: return glb['interpolation'] dflt = self.__interpolation_dflt( glb ) if dflt is not None: return dflt return 'linear' def __perchanweightdensity( self, glb ): if 'perchanweightdensity' in glb: return glb['perchanweightdensity'] dflt = self.__perchanweightdensity_dflt( glb ) if dflt is not None: return dflt return True def __facets( self, glb ): if 'facets' in glb: return glb['facets'] dflt = self.__facets_dflt( glb ) if dflt is not None: return dflt return int(1) def __psfphasecenter( self, glb ): if 'psfphasecenter' in glb: return glb['psfphasecenter'] dflt = self.__psfphasecenter_dflt( glb ) if dflt is not None: return dflt return '' def __wprojplanes( self, glb ): if 'wprojplanes' in glb: return glb['wprojplanes'] dflt = self.__wprojplanes_dflt( glb ) if dflt is not None: return dflt return int(1) def __vptable( self, glb ): if 'vptable' in glb: return glb['vptable'] dflt = self.__vptable_dflt( glb ) if dflt is not None: return dflt return '' def __mosweight( self, glb ): if 'mosweight' in glb: return glb['mosweight'] dflt = self.__mosweight_dflt( glb ) if dflt is not None: return dflt return True def __aterm( self, glb ): if 'aterm' in glb: return glb['aterm'] dflt = self.__aterm_dflt( glb ) if dflt is not None: return dflt return True def __psterm( self, glb ): if 'psterm' in glb: return glb['psterm'] dflt = self.__psterm_dflt( glb ) if dflt is not None: return dflt return False def __wbawp( self, glb ): if 'wbawp' in glb: return glb['wbawp'] dflt = self.__wbawp_dflt( glb ) if dflt is not None: return dflt return True def __conjbeams( self, glb ): if 'conjbeams' in glb: return glb['conjbeams'] dflt = self.__conjbeams_dflt( glb ) if dflt is not None: return dflt return False def __cfcache( self, glb ): if 'cfcache' in glb: return glb['cfcache'] dflt = self.__cfcache_dflt( glb ) if dflt is not None: return dflt return '' def __usepointing( self, glb ): if 'usepointing' in glb: return glb['usepointing'] dflt = self.__usepointing_dflt( glb ) if dflt is not None: return dflt return False def __computepastep( self, glb ): if 'computepastep' in glb: return glb['computepastep'] dflt = self.__computepastep_dflt( glb ) if dflt is not None: return dflt return float(360.0) def __rotatepastep( self, glb ): if 'rotatepastep' in glb: return glb['rotatepastep'] dflt = self.__rotatepastep_dflt( glb ) if dflt is not None: return dflt return float(360.0) def __pointingoffsetsigdev( self, glb ): if 'pointingoffsetsigdev' in glb: return glb['pointingoffsetsigdev'] dflt = self.__pointingoffsetsigdev_dflt( glb ) if dflt is not None: return dflt return [ ] def __pblimit( self, glb ): if 'pblimit' in glb: return glb['pblimit'] dflt = self.__pblimit_dflt( glb ) if dflt is not None: return dflt return float(0.2) def __normtype( self, glb ): if 'normtype' in glb: return glb['normtype'] dflt = self.__normtype_dflt( glb ) if dflt is not None: return dflt return 'flatnoise' def __scales( self, glb ): if 'scales' in glb: return glb['scales'] dflt = self.__scales_dflt( glb ) if dflt is not None: return dflt return [ ] def __nterms( self, glb ): if 'nterms' in glb: return glb['nterms'] dflt = self.__nterms_dflt( glb ) if dflt is not None: return dflt return int(2) def __smallscalebias( self, glb ): if 'smallscalebias' in glb: return glb['smallscalebias'] dflt = self.__smallscalebias_dflt( glb ) if dflt is not None: return dflt return float(0.0) def __restoringbeam( self, glb ): if 'restoringbeam' in glb: return glb['restoringbeam'] dflt = self.__restoringbeam_dflt( glb ) if dflt is not None: return dflt return [ ] def __pbcor( self, glb ): if 'pbcor' in glb: return glb['pbcor'] dflt = self.__pbcor_dflt( glb ) if dflt is not None: return dflt return False def __robust( self, glb ): if 'robust' in glb: return glb['robust'] dflt = self.__robust_dflt( glb ) if dflt is not None: return dflt return float(0.5) def __noise( self, glb ): if 'noise' in glb: return glb['noise'] dflt = self.__noise_dflt( glb ) if dflt is not None: return dflt return '1.0Jy' def __npixels( self, glb ): if 'npixels' in glb: return glb['npixels'] dflt = self.__npixels_dflt( glb ) if dflt is not None: return dflt return int(0) def __uvtaper( self, glb ): if 'uvtaper' in glb: return glb['uvtaper'] dflt = self.__uvtaper_dflt( glb ) if dflt is not None: return dflt return [ '' ] def __gain( self, glb ): if 'gain' in glb: return glb['gain'] dflt = self.__gain_dflt( glb ) if dflt is not None: return dflt return float(0.1) def __threshold( self, glb ): if 'threshold' in glb: return glb['threshold'] dflt = self.__threshold_dflt( glb ) if dflt is not None: return dflt return float(0.0) def __nsigma( self, glb ): if 'nsigma' in glb: return glb['nsigma'] dflt = self.__nsigma_dflt( glb ) if dflt is not None: return dflt return float(0.0) def __cycleniter( self, glb ): if 'cycleniter' in glb: return glb['cycleniter'] dflt = self.__cycleniter_dflt( glb ) if dflt is not None: return dflt return int(-1) def __cyclefactor( self, glb ): if 'cyclefactor' in glb: return glb['cyclefactor'] dflt = self.__cyclefactor_dflt( glb ) if dflt is not None: return dflt return float(1.0) def __minpsffraction( self, glb ): if 'minpsffraction' in glb: return glb['minpsffraction'] dflt = self.__minpsffraction_dflt( glb ) if dflt is not None: return dflt return float(0.05) def __maxpsffraction( self, glb ): if 'maxpsffraction' in glb: return glb['maxpsffraction'] dflt = self.__maxpsffraction_dflt( glb ) if dflt is not None: return dflt return float(0.8) def __interactive( self, glb ): if 'interactive' in glb: return glb['interactive'] dflt = self.__interactive_dflt( glb ) if dflt is not None: return dflt return False def __mask( self, glb ): if 'mask' in glb: return glb['mask'] dflt = self.__mask_dflt( glb ) if dflt is not None: return dflt return '' def __pbmask( self, glb ): if 'pbmask' in glb: return glb['pbmask'] dflt = self.__pbmask_dflt( glb ) if dflt is not None: return dflt return float(0.0) def __sidelobethreshold( self, glb ): if 'sidelobethreshold' in glb: return glb['sidelobethreshold'] dflt = self.__sidelobethreshold_dflt( glb ) if dflt is not None: return dflt return float(3.0) def __noisethreshold( self, glb ): if 'noisethreshold' in glb: return glb['noisethreshold'] dflt = self.__noisethreshold_dflt( glb ) if dflt is not None: return dflt return float(5.0) def __lownoisethreshold( self, glb ): if 'lownoisethreshold' in glb: return glb['lownoisethreshold'] dflt = self.__lownoisethreshold_dflt( glb ) if dflt is not None: return dflt return float(1.5) def __negativethreshold( self, glb ): if 'negativethreshold' in glb: return glb['negativethreshold'] dflt = self.__negativethreshold_dflt( glb ) if dflt is not None: return dflt return float(0.0) def __smoothfactor( self, glb ): if 'smoothfactor' in glb: return glb['smoothfactor'] dflt = self.__smoothfactor_dflt( glb ) if dflt is not None: return dflt return float(1.0) def __minbeamfrac( self, glb ): if 'minbeamfrac' in glb: return glb['minbeamfrac'] dflt = self.__minbeamfrac_dflt( glb ) if dflt is not None: return dflt return float(0.3) def __cutthreshold( self, glb ): if 'cutthreshold' in glb: return glb['cutthreshold'] dflt = self.__cutthreshold_dflt( glb ) if dflt is not None: return dflt return float(0.01) def __growiterations( self, glb ): if 'growiterations' in glb: return glb['growiterations'] dflt = self.__growiterations_dflt( glb ) if dflt is not None: return dflt return int(75) def __dogrowprune( self, glb ): if 'dogrowprune' in glb: return glb['dogrowprune'] dflt = self.__dogrowprune_dflt( glb ) if dflt is not None: return dflt return True def __minpercentchange( self, glb ): if 'minpercentchange' in glb: return glb['minpercentchange'] dflt = self.__minpercentchange_dflt( glb ) if dflt is not None: return dflt return float(-1.0) def __verbose( self, glb ): if 'verbose' in glb: return glb['verbose'] dflt = self.__verbose_dflt( glb ) if dflt is not None: return dflt return False def __psfcutoff( self, glb ): if 'psfcutoff' in glb: return glb['psfcutoff'] dflt = self.__psfcutoff_dflt( glb ) if dflt is not None: return dflt return float(0.35) #--------- subparam inp output ---------------------------------------------------- def __vis_inp(self): description = 'Name of input visibility file(s)' value = self.__vis( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'vis': value},{'vis': self.__schema['vis']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('vis',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __imageprefix_inp(self): description = '' value = self.__imageprefix( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'imageprefix': value},{'imageprefix': self.__schema['imageprefix']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('imageprefix',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __imagesuffix_inp(self): description = '' value = self.__imagesuffix( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'imagesuffix': value},{'imagesuffix': self.__schema['imagesuffix']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('imagesuffix',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __ncpu_inp(self): description = '' value = self.__ncpu( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'ncpu': value},{'ncpu': self.__schema['ncpu']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('ncpu',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __twidth_inp(self): description = '' value = self.__twidth( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'twidth': value},{'twidth': self.__schema['twidth']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('twidth',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __doreg_inp(self): description = '' value = self.__doreg( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'doreg': value},{'doreg': self.__schema['doreg']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('doreg',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __usephacenter_inp(self): if self.__usephacenter_dflt( self.__globals_( ) ) is not None: description = '' value = self.__usephacenter( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'usephacenter': value},{'usephacenter': self.__schema['usephacenter']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('usephacenter',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __reftime_inp(self): if self.__reftime_dflt( self.__globals_( ) ) is not None: description = '' value = self.__reftime( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'reftime': value},{'reftime': self.__schema['reftime']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('reftime',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __toTb_inp(self): if self.__toTb_dflt( self.__globals_( ) ) is not None: description = '' value = self.__toTb( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'toTb': value},{'toTb': self.__schema['toTb']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('toTb',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __sclfactor_inp(self): if self.__sclfactor_dflt( self.__globals_( ) ) is not None: description = '' value = self.__sclfactor( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'sclfactor': value},{'sclfactor': self.__schema['sclfactor']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('sclfactor',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __subregion_inp(self): if self.__subregion_dflt( self.__globals_( ) ) is not None: description = 'The name of a CASA region string' value = self.__subregion( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'subregion': value},{'subregion': self.__schema['subregion']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('subregion',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __docompress_inp(self): if self.__docompress_dflt( self.__globals_( ) ) is not None: description = '' value = self.__docompress( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'docompress': value},{'docompress': self.__schema['docompress']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('docompress',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __overwrite_inp(self): description = '' value = self.__overwrite( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'overwrite': value},{'overwrite': self.__schema['overwrite']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('overwrite',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __selectdata_inp(self): description = 'Enable data selection parameters' value = self.__selectdata( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'selectdata': value},{'selectdata': self.__schema['selectdata']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('selectdata',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __field_inp(self): if self.__field_dflt( self.__globals_( ) ) is not None: description = 'field(s) to select' value = self.__field( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'field': value},{'field': self.__schema['field']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('field',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __spw_inp(self): if self.__spw_dflt( self.__globals_( ) ) is not None: description = 'spw(s)/channels to select' value = self.__spw( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'spw': value},{'spw': self.__schema['spw']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('spw',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __timerange_inp(self): if self.__timerange_dflt( self.__globals_( ) ) is not None: description = 'Range of time to select from data' value = self.__timerange( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'timerange': value},{'timerange': self.__schema['timerange']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('timerange',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __uvrange_inp(self): if self.__uvrange_dflt( self.__globals_( ) ) is not None: description = 'Select data within uvrange' value = self.__uvrange( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'uvrange': value},{'uvrange': self.__schema['uvrange']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('uvrange',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __antenna_inp(self): if self.__antenna_dflt( self.__globals_( ) ) is not None: description = 'Select data based on antenna/baseline' value = self.__antenna( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'antenna': value},{'antenna': self.__schema['antenna']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('antenna',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __scan_inp(self): if self.__scan_dflt( self.__globals_( ) ) is not None: description = 'Scan number range' value = self.__scan( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'scan': value},{'scan': self.__schema['scan']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('scan',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __observation_inp(self): if self.__observation_dflt( self.__globals_( ) ) is not None: description = 'Observation ID range' value = self.__observation( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'observation': value},{'observation': self.__schema['observation']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('observation',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __intent_inp(self): if self.__intent_dflt( self.__globals_( ) ) is not None: description = 'Scan Intent(s)' value = self.__intent( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'intent': value},{'intent': self.__schema['intent']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('intent',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __datacolumn_inp(self): description = 'Data column to image(data,corrected)' value = self.__datacolumn( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'datacolumn': value},{'datacolumn': self.__schema['datacolumn']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('datacolumn',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __imagename_inp(self): description = 'Pre-name of output images' value = self.__imagename( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'imagename': value},{'imagename': self.__schema['imagename']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('imagename',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __imsize_inp(self): description = 'Number of pixels' value = self.__imsize( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'imsize': value},{'imsize': self.__schema['imsize']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('imsize',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __cell_inp(self): description = 'Cell size' value = self.__cell( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'cell': value},{'cell': self.__schema['cell']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('cell',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __phasecenter_inp(self): description = 'Phase center of the image' value = self.__phasecenter( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'phasecenter': value},{'phasecenter': self.__schema['phasecenter']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('phasecenter',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __stokes_inp(self): description = 'Stokes Planes to make' value = self.__stokes( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'stokes': value},{'stokes': self.__schema['stokes']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('stokes',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __projection_inp(self): description = 'Coordinate projection' value = self.__projection( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'projection': value},{'projection': self.__schema['projection']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('projection',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __startmodel_inp(self): description = 'Name of starting model image' value = self.__startmodel( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'startmodel': value},{'startmodel': self.__schema['startmodel']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('startmodel',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __specmode_inp(self): description = 'Spectral definition mode (mfs,cube,cubedata, cubesource)' value = self.__specmode( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'specmode': value},{'specmode': self.__schema['specmode']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('specmode',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __reffreq_inp(self): if self.__reffreq_dflt( self.__globals_( ) ) is not None: description = 'Reference frequency' value = self.__reffreq( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'reffreq': value},{'reffreq': self.__schema['reffreq']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('reffreq',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __nchan_inp(self): if self.__nchan_dflt( self.__globals_( ) ) is not None: description = 'Number of channels in the output image' value = self.__nchan( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'nchan': value},{'nchan': self.__schema['nchan']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('nchan',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __start_inp(self): if self.__start_dflt( self.__globals_( ) ) is not None: description = 'First channel (e.g. start=3,start=\'1.1GHz\',start=\'15343km/s\')' value = self.__start( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'start': value},{'start': self.__schema['start']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('start',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __width_inp(self): if self.__width_dflt( self.__globals_( ) ) is not None: description = 'Channel width (e.g. width=2,width=\'0.1MHz\',width=\'10km/s\')' value = self.__width( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'width': value},{'width': self.__schema['width']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('width',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __outframe_inp(self): if self.__outframe_dflt( self.__globals_( ) ) is not None: description = 'Spectral reference frame in which to interpret \'start\' and \'width\'' value = self.__outframe( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'outframe': value},{'outframe': self.__schema['outframe']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('outframe',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __veltype_inp(self): if self.__veltype_dflt( self.__globals_( ) ) is not None: description = 'Velocity type (radio, z, ratio, beta, gamma, optical)' value = self.__veltype( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'veltype': value},{'veltype': self.__schema['veltype']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('veltype',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __restfreq_inp(self): if self.__restfreq_dflt( self.__globals_( ) ) is not None: description = 'List of rest frequencies' value = self.__restfreq( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'restfreq': value},{'restfreq': self.__schema['restfreq']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('restfreq',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __interpolation_inp(self): if self.__interpolation_dflt( self.__globals_( ) ) is not None: description = 'Spectral interpolation (nearest,linear,cubic)' value = self.__interpolation( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'interpolation': value},{'interpolation': self.__schema['interpolation']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('interpolation',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __perchanweightdensity_inp(self): if self.__perchanweightdensity_dflt( self.__globals_( ) ) is not None: description = 'whether to calculate weight density per channel in Briggs style weighting or not' value = self.__perchanweightdensity( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'perchanweightdensity': value},{'perchanweightdensity': self.__schema['perchanweightdensity']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('perchanweightdensity',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __gridder_inp(self): description = 'Gridding options (standard, wproject, widefield, mosaic, awproject)' value = self.__gridder( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'gridder': value},{'gridder': self.__schema['gridder']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('gridder',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __facets_inp(self): if self.__facets_dflt( self.__globals_( ) ) is not None: description = 'Number of facets on a side' value = self.__facets( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'facets': value},{'facets': self.__schema['facets']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('facets',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __psfphasecenter_inp(self): if self.__psfphasecenter_dflt( self.__globals_( ) ) is not None: description = 'optional direction to calculate psf for mosaic (default is image phasecenter)' value = self.__psfphasecenter( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'psfphasecenter': value},{'psfphasecenter': self.__schema['psfphasecenter']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('psfphasecenter',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __wprojplanes_inp(self): if self.__wprojplanes_dflt( self.__globals_( ) ) is not None: description = 'Number of distinct w-values for convolution functions' value = self.__wprojplanes( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'wprojplanes': value},{'wprojplanes': self.__schema['wprojplanes']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('wprojplanes',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __vptable_inp(self): if self.__vptable_dflt( self.__globals_( ) ) is not None: description = 'Name of Voltage Pattern table' value = self.__vptable( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'vptable': value},{'vptable': self.__schema['vptable']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('vptable',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __mosweight_inp(self): if self.__mosweight_dflt( self.__globals_( ) ) is not None: description = 'Indepently weight each field in a mosaic' value = self.__mosweight( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'mosweight': value},{'mosweight': self.__schema['mosweight']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('mosweight',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __aterm_inp(self): if self.__aterm_dflt( self.__globals_( ) ) is not None: description = 'Use aperture illumination functions during gridding' value = self.__aterm( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'aterm': value},{'aterm': self.__schema['aterm']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('aterm',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __psterm_inp(self): if self.__psterm_dflt( self.__globals_( ) ) is not None: description = 'Use prolate spheroidal during gridding' value = self.__psterm( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'psterm': value},{'psterm': self.__schema['psterm']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('psterm',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __wbawp_inp(self): if self.__wbawp_dflt( self.__globals_( ) ) is not None: description = 'Use wideband A-terms' value = self.__wbawp( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'wbawp': value},{'wbawp': self.__schema['wbawp']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('wbawp',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __conjbeams_inp(self): if self.__conjbeams_dflt( self.__globals_( ) ) is not None: description = 'Use conjugate frequency for wideband A-terms' value = self.__conjbeams( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'conjbeams': value},{'conjbeams': self.__schema['conjbeams']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('conjbeams',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __cfcache_inp(self): if self.__cfcache_dflt( self.__globals_( ) ) is not None: description = 'Convolution function cache directory name' value = self.__cfcache( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'cfcache': value},{'cfcache': self.__schema['cfcache']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('cfcache',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __usepointing_inp(self): if self.__usepointing_dflt( self.__globals_( ) ) is not None: description = 'The parameter makes the gridder utilize the pointing table phase directions while computing the residual image.' value = self.__usepointing( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'usepointing': value},{'usepointing': self.__schema['usepointing']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('usepointing',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __computepastep_inp(self): if self.__computepastep_dflt( self.__globals_( ) ) is not None: description = 'Parallactic angle interval after the AIFs are recomputed (deg)' value = self.__computepastep( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'computepastep': value},{'computepastep': self.__schema['computepastep']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('computepastep',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __rotatepastep_inp(self): if self.__rotatepastep_dflt( self.__globals_( ) ) is not None: description = 'Parallactic angle interval after which the nearest AIF is rotated (deg)' value = self.__rotatepastep( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'rotatepastep': value},{'rotatepastep': self.__schema['rotatepastep']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('rotatepastep',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __pointingoffsetsigdev_inp(self): if self.__pointingoffsetsigdev_dflt( self.__globals_( ) ) is not None: description = 'Pointing offset threshold to determine heterogeneity of pointing corrections for the AWProject gridder' value = self.__pointingoffsetsigdev( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'pointingoffsetsigdev': value},{'pointingoffsetsigdev': self.__schema['pointingoffsetsigdev']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('pointingoffsetsigdev',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __pblimit_inp(self): if self.__pblimit_dflt( self.__globals_( ) ) is not None: description = 'PB gain level at which to cut off normalizations' value = self.__pblimit( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'pblimit': value},{'pblimit': self.__schema['pblimit']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('pblimit',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __normtype_inp(self): if self.__normtype_dflt( self.__globals_( ) ) is not None: description = 'Normalization type (flatnoise, flatsky,pbsquare)' value = self.__normtype( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'normtype': value},{'normtype': self.__schema['normtype']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('normtype',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __deconvolver_inp(self): description = 'Minor cycle algorithm (hogbom,clark,multiscale,mtmfs,mem,clarkstokes)' value = self.__deconvolver( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'deconvolver': value},{'deconvolver': self.__schema['deconvolver']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('deconvolver',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __scales_inp(self): if self.__scales_dflt( self.__globals_( ) ) is not None: description = 'List of scale sizes (in pixels) for multi-scale algorithms' value = self.__scales( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'scales': value},{'scales': self.__schema['scales']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('scales',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __nterms_inp(self): if self.__nterms_dflt( self.__globals_( ) ) is not None: description = 'Number of Taylor coefficients in the spectral model' value = self.__nterms( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'nterms': value},{'nterms': self.__schema['nterms']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('nterms',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __smallscalebias_inp(self): if self.__smallscalebias_dflt( self.__globals_( ) ) is not None: description = 'Biases the scale selection when using multi-scale or mtmfs deconvolvers' value = self.__smallscalebias( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'smallscalebias': value},{'smallscalebias': self.__schema['smallscalebias']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('smallscalebias',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __restoration_inp(self): description = 'Do restoration steps (or not)' value = self.__restoration( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'restoration': value},{'restoration': self.__schema['restoration']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('restoration',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __restoringbeam_inp(self): if self.__restoringbeam_dflt( self.__globals_( ) ) is not None: description = 'Restoring beam shape to use. Default is the PSF main lobe' value = self.__restoringbeam( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'restoringbeam': value},{'restoringbeam': self.__schema['restoringbeam']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('restoringbeam',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __pbcor_inp(self): if self.__pbcor_dflt( self.__globals_( ) ) is not None: description = 'Apply PB correction on the output restored image' value = self.__pbcor( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'pbcor': value},{'pbcor': self.__schema['pbcor']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('pbcor',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __outlierfile_inp(self): description = 'Name of outlier-field image definitions' value = self.__outlierfile( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'outlierfile': value},{'outlierfile': self.__schema['outlierfile']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('outlierfile',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __weighting_inp(self): description = 'Weighting scheme (natural,uniform,briggs, briggsabs[experimental], briggsbwtaper[experimental])' value = self.__weighting( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'weighting': value},{'weighting': self.__schema['weighting']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('weighting',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __robust_inp(self): if self.__robust_dflt( self.__globals_( ) ) is not None: description = 'Robustness parameter' value = self.__robust( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'robust': value},{'robust': self.__schema['robust']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('robust',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __noise_inp(self): if self.__noise_dflt( self.__globals_( ) ) is not None: description = 'noise parameter for briggs abs mode weighting' value = self.__noise( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'noise': value},{'noise': self.__schema['noise']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('noise',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __npixels_inp(self): if self.__npixels_dflt( self.__globals_( ) ) is not None: description = 'Number of pixels to determine uv-cell size' value = self.__npixels( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'npixels': value},{'npixels': self.__schema['npixels']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('npixels',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __uvtaper_inp(self): if self.__uvtaper_dflt( self.__globals_( ) ) is not None: description = 'uv-taper on outer baselines in uv-plane' value = self.__uvtaper( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'uvtaper': value},{'uvtaper': self.__schema['uvtaper']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('uvtaper',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __niter_inp(self): description = 'Maximum number of iterations' value = self.__niter( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'niter': value},{'niter': self.__schema['niter']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('niter',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __gain_inp(self): if self.__gain_dflt( self.__globals_( ) ) is not None: description = 'Loop gain' value = self.__gain( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'gain': value},{'gain': self.__schema['gain']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('gain',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __threshold_inp(self): if self.__threshold_dflt( self.__globals_( ) ) is not None: description = 'Stopping threshold' value = self.__threshold( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'threshold': value},{'threshold': self.__schema['threshold']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('threshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __nsigma_inp(self): if self.__nsigma_dflt( self.__globals_( ) ) is not None: description = 'Multiplicative factor for rms-based threshold stopping' value = self.__nsigma( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'nsigma': value},{'nsigma': self.__schema['nsigma']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('nsigma',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __cycleniter_inp(self): if self.__cycleniter_dflt( self.__globals_( ) ) is not None: description = 'Maximum number of minor-cycle iterations' value = self.__cycleniter( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'cycleniter': value},{'cycleniter': self.__schema['cycleniter']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('cycleniter',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __cyclefactor_inp(self): if self.__cyclefactor_dflt( self.__globals_( ) ) is not None: description = 'Scaling on PSF sidelobe level to compute the minor-cycle stopping threshold.' value = self.__cyclefactor( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'cyclefactor': value},{'cyclefactor': self.__schema['cyclefactor']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('cyclefactor',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __minpsffraction_inp(self): if self.__minpsffraction_dflt( self.__globals_( ) ) is not None: description = 'PSF fraction that marks the max depth of cleaning in the minor cycle' value = self.__minpsffraction( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'minpsffraction': value},{'minpsffraction': self.__schema['minpsffraction']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('minpsffraction',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __maxpsffraction_inp(self): if self.__maxpsffraction_dflt( self.__globals_( ) ) is not None: description = 'PSF fraction that marks the minimum depth of cleaning in the minor cycle' value = self.__maxpsffraction( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'maxpsffraction': value},{'maxpsffraction': self.__schema['maxpsffraction']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('maxpsffraction',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __interactive_inp(self): if self.__interactive_dflt( self.__globals_( ) ) is not None: description = 'Modify masks and parameters at runtime' value = self.__interactive( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'interactive': value},{'interactive': self.__schema['interactive']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('interactive',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __usemask_inp(self): description = 'Type of mask(s) for deconvolution: user, pb, or auto-multithresh' value = self.__usemask( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'usemask': value},{'usemask': self.__schema['usemask']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('usemask',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __mask_inp(self): if self.__mask_dflt( self.__globals_( ) ) is not None: description = 'Mask (a list of image name(s) or region file(s) or region string(s) )' value = self.__mask( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'mask': value},{'mask': self.__schema['mask']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('mask',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __pbmask_inp(self): if self.__pbmask_dflt( self.__globals_( ) ) is not None: description = 'primary beam mask' value = self.__pbmask( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'pbmask': value},{'pbmask': self.__schema['pbmask']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('pbmask',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __sidelobethreshold_inp(self): if self.__sidelobethreshold_dflt( self.__globals_( ) ) is not None: description = 'sidelobethreshold * the max sidelobe level * peak residual' value = self.__sidelobethreshold( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'sidelobethreshold': value},{'sidelobethreshold': self.__schema['sidelobethreshold']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('sidelobethreshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __noisethreshold_inp(self): if self.__noisethreshold_dflt( self.__globals_( ) ) is not None: description = 'noisethreshold * rms in residual image + location(median)' value = self.__noisethreshold( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'noisethreshold': value},{'noisethreshold': self.__schema['noisethreshold']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('noisethreshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __lownoisethreshold_inp(self): if self.__lownoisethreshold_dflt( self.__globals_( ) ) is not None: description = 'lownoisethreshold * rms in residual image + location(median)' value = self.__lownoisethreshold( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'lownoisethreshold': value},{'lownoisethreshold': self.__schema['lownoisethreshold']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('lownoisethreshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __negativethreshold_inp(self): if self.__negativethreshold_dflt( self.__globals_( ) ) is not None: description = 'negativethreshold * rms in residual image + location(median)' value = self.__negativethreshold( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'negativethreshold': value},{'negativethreshold': self.__schema['negativethreshold']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('negativethreshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __smoothfactor_inp(self): if self.__smoothfactor_dflt( self.__globals_( ) ) is not None: description = 'smoothing factor in a unit of the beam' value = self.__smoothfactor( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'smoothfactor': value},{'smoothfactor': self.__schema['smoothfactor']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('smoothfactor',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __minbeamfrac_inp(self): if self.__minbeamfrac_dflt( self.__globals_( ) ) is not None: description = 'minimum beam fraction for pruning' value = self.__minbeamfrac( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'minbeamfrac': value},{'minbeamfrac': self.__schema['minbeamfrac']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('minbeamfrac',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __cutthreshold_inp(self): if self.__cutthreshold_dflt( self.__globals_( ) ) is not None: description = 'threshold to cut the smoothed mask to create a final mask' value = self.__cutthreshold( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'cutthreshold': value},{'cutthreshold': self.__schema['cutthreshold']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('cutthreshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __growiterations_inp(self): if self.__growiterations_dflt( self.__globals_( ) ) is not None: description = 'number of binary dilation iterations for growing the mask' value = self.__growiterations( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'growiterations': value},{'growiterations': self.__schema['growiterations']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('growiterations',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __dogrowprune_inp(self): if self.__dogrowprune_dflt( self.__globals_( ) ) is not None: description = 'Do pruning on the grow mask' value = self.__dogrowprune( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'dogrowprune': value},{'dogrowprune': self.__schema['dogrowprune']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('dogrowprune',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __minpercentchange_inp(self): if self.__minpercentchange_dflt( self.__globals_( ) ) is not None: description = 'minimum percentage change in mask size (per channel plane) to trigger updating of mask by automask' value = self.__minpercentchange( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'minpercentchange': value},{'minpercentchange': self.__schema['minpercentchange']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('minpercentchange',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __verbose_inp(self): if self.__verbose_dflt( self.__globals_( ) ) is not None: description = 'True: print more automasking information in the logger' value = self.__verbose( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'verbose': value},{'verbose': self.__schema['verbose']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('verbose',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __fastnoise_inp(self): description = 'True: use the faster (old) noise calculation. False: use the new improved noise calculations' value = self.__fastnoise( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'fastnoise': value},{'fastnoise': self.__schema['fastnoise']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('fastnoise',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __restart_inp(self): description = 'True : Re-use existing images. False : Increment imagename' value = self.__restart( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'restart': value},{'restart': self.__schema['restart']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('restart',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __savemodel_inp(self): description = 'Options to save model visibilities (none, virtual, modelcolumn)' value = self.__savemodel( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'savemodel': value},{'savemodel': self.__schema['savemodel']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('savemodel',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __calcres_inp(self): description = 'Calculate initial residual image' value = self.__calcres( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'calcres': value},{'calcres': self.__schema['calcres']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('calcres',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __calcpsf_inp(self): description = 'Calculate PSF' value = self.__calcpsf( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'calcpsf': value},{'calcpsf': self.__schema['calcpsf']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('calcpsf',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __psfcutoff_inp(self): if self.__psfcutoff_dflt( self.__globals_( ) ) is not None: description = 'All pixels in the main lobe of the PSF above psfcutoff are used to fit a Gaussian beam (the Clean beam).' value = self.__psfcutoff( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'psfcutoff': value},{'psfcutoff': self.__schema['psfcutoff']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('psfcutoff',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __parallel_inp(self): description = 'Run major cycles in parallel' value = self.__parallel( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'parallel': value},{'parallel': self.__schema['parallel']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('parallel',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) #--------- global default implementation------------------------------------------- @static_var('state', __sf__('casa_inp_go_state')) def set_global_defaults(self): self.set_global_defaults.state['last'] = self glb = self.__globals_( ) if 'antenna' in glb: del glb['antenna'] if 'smoothfactor' in glb: del glb['smoothfactor'] if 'stokes' in glb: del glb['stokes'] if 'negativethreshold' in glb: del glb['negativethreshold'] if 'deconvolver' in glb: del glb['deconvolver'] if 'minbeamfrac' in glb: del glb['minbeamfrac'] if 'doreg' in glb: del glb['doreg'] if 'savemodel' in glb: del glb['savemodel'] if 'psfphasecenter' in glb: del glb['psfphasecenter'] if 'mask' in glb: del glb['mask'] if 'sclfactor' in glb: del glb['sclfactor'] if 'field' in glb: del glb['field'] if 'cutthreshold' in glb: del glb['cutthreshold'] if 'projection' in glb: del glb['projection'] if 'pblimit' in glb: del glb['pblimit'] if 'smallscalebias' in glb: del glb['smallscalebias'] if 'maxpsffraction' in glb: del glb['maxpsffraction'] if 'datacolumn' in glb: del glb['datacolumn'] if 'verbose' in glb: del glb['verbose'] if 'weighting' in glb: del glb['weighting'] if 'intent' in glb: del glb['intent'] if 'noise' in glb: del glb['noise'] if 'interpolation' in glb: del glb['interpolation'] if 'subregion' in glb: del glb['subregion'] if 'nterms' in glb: del glb['nterms'] if 'pointingoffsetsigdev' in glb: del glb['pointingoffsetsigdev'] if 'nchan' in glb: del glb['nchan'] if 'reffreq' in glb: del glb['reffreq'] if 'conjbeams' in glb: del glb['conjbeams'] if 'restoringbeam' in glb: del glb['restoringbeam'] if 'sidelobethreshold' in glb: del glb['sidelobethreshold'] if 'reftime' in glb: del glb['reftime'] if 'gridder' in glb: del glb['gridder'] if 'cycleniter' in glb: del glb['cycleniter'] if 'imagename' in glb: del glb['imagename'] if 'minpsffraction' in glb: del glb['minpsffraction'] if 'imsize' in glb: del glb['imsize'] if 'scan' in glb: del glb['scan'] if 'vis' in glb: del glb['vis'] if 'outlierfile' in glb: del glb['outlierfile'] if 'computepastep' in glb: del glb['computepastep'] if 'minpercentchange' in glb: del glb['minpercentchange'] if 'fastnoise' in glb: del glb['fastnoise'] if 'wbawp' in glb: del glb['wbawp'] if 'docompress' in glb: del glb['docompress'] if 'interactive' in glb: del glb['interactive'] if 'specmode' in glb: del glb['specmode'] if 'npixels' in glb: del glb['npixels'] if 'mosweight' in glb: del glb['mosweight'] if 'pbcor' in glb: del glb['pbcor'] if 'calcres' in glb: del glb['calcres'] if 'normtype' in glb: del glb['normtype'] if 'uvtaper' in glb: del glb['uvtaper'] if 'cyclefactor' in glb: del glb['cyclefactor'] if 'toTb' in glb: del glb['toTb'] if 'restfreq' in glb: del glb['restfreq'] if 'imageprefix' in glb: del glb['imageprefix'] if 'pbmask' in glb: del glb['pbmask'] if 'growiterations' in glb: del glb['growiterations'] if 'gain' in glb: del glb['gain'] if 'scales' in glb: del glb['scales'] if 'twidth' in glb: del glb['twidth'] if 'psfcutoff' in glb: del glb['psfcutoff'] if 'robust' in glb: del glb['robust'] if 'vptable' in glb: del glb['vptable'] if 'perchanweightdensity' in glb: del glb['perchanweightdensity'] if 'aterm' in glb: del glb['aterm'] if 'imagesuffix' in glb: del glb['imagesuffix'] if 'usephacenter' in glb: del glb['usephacenter'] if 'usepointing' in glb: del glb['usepointing'] if 'rotatepastep' in glb: del glb['rotatepastep'] if 'threshold' in glb: del glb['threshold'] if 'ncpu' in glb: del glb['ncpu'] if 'veltype' in glb: del glb['veltype'] if 'calcpsf' in glb: del glb['calcpsf'] if 'usemask' in glb: del glb['usemask'] if 'restoration' in glb: del glb['restoration'] if 'niter' in glb: del glb['niter'] if 'outframe' in glb: del glb['outframe'] if 'dogrowprune' in glb: del glb['dogrowprune'] if 'cell' in glb: del glb['cell'] if 'uvrange' in glb: del glb['uvrange'] if 'psterm' in glb: del glb['psterm'] if 'phasecenter' in glb: del glb['phasecenter'] if 'overwrite' in glb: del glb['overwrite'] if 'restart' in glb: del glb['restart'] if 'start' in glb: del glb['start'] if 'observation' in glb: del glb['observation'] if 'lownoisethreshold' in glb: del glb['lownoisethreshold'] if 'facets' in glb: del glb['facets'] if 'noisethreshold' in glb: del glb['noisethreshold'] if 'width' in glb: del glb['width'] if 'spw' in glb: del glb['spw'] if 'selectdata' in glb: del glb['selectdata'] if 'timerange' in glb: del glb['timerange'] if 'parallel' in glb: del glb['parallel'] if 'nsigma' in glb: del glb['nsigma'] if 'cfcache' in glb: del glb['cfcache'] if 'wprojplanes' in glb: del glb['wprojplanes'] if 'startmodel' in glb: del glb['startmodel'] #--------- inp function ----------------------------------------------------------- def inp(self): print("# ptclean6 -- %s" % self._info_desc_) self.term_width, self.term_height = shutil.get_terminal_size(fallback=(80, 24)) self.__vis_inp( ) self.__imageprefix_inp( ) self.__imagesuffix_inp( ) self.__ncpu_inp( ) self.__twidth_inp( ) self.__doreg_inp( ) self.__usephacenter_inp( ) self.__reftime_inp( ) self.__toTb_inp( ) self.__sclfactor_inp( ) self.__subregion_inp( ) self.__docompress_inp( ) self.__overwrite_inp( ) self.__selectdata_inp( ) self.__field_inp( ) self.__spw_inp( ) self.__timerange_inp( ) self.__uvrange_inp( ) self.__antenna_inp( ) self.__scan_inp( ) self.__observation_inp( ) self.__intent_inp( ) self.__datacolumn_inp( ) self.__imagename_inp( ) self.__imsize_inp( ) self.__cell_inp( ) self.__phasecenter_inp( ) self.__stokes_inp( ) self.__projection_inp( ) self.__startmodel_inp( ) self.__specmode_inp( ) self.__reffreq_inp( ) self.__nchan_inp( ) self.__start_inp( ) self.__width_inp( ) self.__outframe_inp( ) self.__veltype_inp( ) self.__restfreq_inp( ) self.__interpolation_inp( ) self.__perchanweightdensity_inp( ) self.__gridder_inp( ) self.__facets_inp( ) self.__psfphasecenter_inp( ) self.__wprojplanes_inp( ) self.__vptable_inp( ) self.__mosweight_inp( ) self.__aterm_inp( ) self.__psterm_inp( ) self.__wbawp_inp( ) self.__conjbeams_inp( ) self.__cfcache_inp( ) self.__usepointing_inp( ) self.__computepastep_inp( ) self.__rotatepastep_inp( ) self.__pointingoffsetsigdev_inp( ) self.__pblimit_inp( ) self.__normtype_inp( ) self.__deconvolver_inp( ) self.__scales_inp( ) self.__nterms_inp( ) self.__smallscalebias_inp( ) self.__restoration_inp( ) self.__restoringbeam_inp( ) self.__pbcor_inp( ) self.__outlierfile_inp( ) self.__weighting_inp( ) self.__robust_inp( ) self.__noise_inp( ) self.__npixels_inp( ) self.__uvtaper_inp( ) self.__niter_inp( ) self.__gain_inp( ) self.__threshold_inp( ) self.__nsigma_inp( ) self.__cycleniter_inp( ) self.__cyclefactor_inp( ) self.__minpsffraction_inp( ) self.__maxpsffraction_inp( ) self.__interactive_inp( ) self.__usemask_inp( ) self.__mask_inp( ) self.__pbmask_inp( ) self.__sidelobethreshold_inp( ) self.__noisethreshold_inp( ) self.__lownoisethreshold_inp( ) self.__negativethreshold_inp( ) self.__smoothfactor_inp( ) self.__minbeamfrac_inp( ) self.__cutthreshold_inp( ) self.__growiterations_inp( ) self.__dogrowprune_inp( ) self.__minpercentchange_inp( ) self.__verbose_inp( ) self.__fastnoise_inp( ) self.__restart_inp( ) self.__savemodel_inp( ) self.__calcres_inp( ) self.__calcpsf_inp( ) self.__psfcutoff_inp( ) self.__parallel_inp( ) #--------- tget function ---------------------------------------------------------- @static_var('state', __sf__('casa_inp_go_state')) def tget(self,file=None): from casashell.private.stack_manip import find_frame from runpy import run_path filename = None if file is None: if os.path.isfile("ptclean6.last"): filename = "ptclean6.last" elif isinstance(file, str): if os.path.isfile(file): filename = file if filename is not None: glob = find_frame( ) newglob = run_path( filename, init_globals={ } ) for i in newglob: glob[i] = newglob[i] self.tget.state['last'] = self else: print("could not find last file, setting defaults instead...") self.set_global_defaults( ) def __call__( self, vis=None, imageprefix=None, imagesuffix=None, ncpu=None, twidth=None, doreg=None, usephacenter=None, reftime=None, toTb=None, sclfactor=None, subregion=None, docompress=None, overwrite=None, selectdata=None, field=None, spw=None, timerange=None, uvrange=None, antenna=None, scan=None, observation=None, intent=None, datacolumn=None, imagename=None, imsize=None, cell=None, phasecenter=None, stokes=None, projection=None, startmodel=None, specmode=None, reffreq=None, nchan=None, start=None, width=None, outframe=None, veltype=None, restfreq=None, interpolation=None, perchanweightdensity=None, gridder=None, facets=None, psfphasecenter=None, wprojplanes=None, vptable=None, mosweight=None, aterm=None, psterm=None, wbawp=None, conjbeams=None, cfcache=None, usepointing=None, computepastep=None, rotatepastep=None, pointingoffsetsigdev=None, pblimit=None, normtype=None, deconvolver=None, scales=None, nterms=None, smallscalebias=None, restoration=None, restoringbeam=None, pbcor=None, outlierfile=None, weighting=None, robust=None, noise=None, npixels=None, uvtaper=None, niter=None, gain=None, threshold=None, nsigma=None, cycleniter=None, cyclefactor=None, minpsffraction=None, maxpsffraction=None, interactive=None, usemask=None, mask=None, pbmask=None, sidelobethreshold=None, noisethreshold=None, lownoisethreshold=None, negativethreshold=None, smoothfactor=None, minbeamfrac=None, cutthreshold=None, growiterations=None, dogrowprune=None, minpercentchange=None, verbose=None, fastnoise=None, restart=None, savemodel=None, calcres=None, calcpsf=None, psfcutoff=None, parallel=None ): def noobj(s): if s.startswith('<') and s.endswith('>'): return "None" else: return s _prefile = os.path.realpath('ptclean6.pre') _postfile = os.path.realpath('ptclean6.last') _return_result_ = None _arguments = [vis,imageprefix,imagesuffix,ncpu,twidth,doreg,usephacenter,reftime,toTb,sclfactor,subregion,docompress,overwrite,selectdata,field,spw,timerange,uvrange,antenna,scan,observation,intent,datacolumn,imagename,imsize,cell,phasecenter,stokes,projection,startmodel,specmode,reffreq,nchan,start,width,outframe,veltype,restfreq,interpolation,perchanweightdensity,gridder,facets,psfphasecenter,wprojplanes,vptable,mosweight,aterm,psterm,wbawp,conjbeams,cfcache,usepointing,computepastep,rotatepastep,pointingoffsetsigdev,pblimit,normtype,deconvolver,scales,nterms,smallscalebias,restoration,restoringbeam,pbcor,outlierfile,weighting,robust,noise,npixels,uvtaper,niter,gain,threshold,nsigma,cycleniter,cyclefactor,minpsffraction,maxpsffraction,interactive,usemask,mask,pbmask,sidelobethreshold,noisethreshold,lownoisethreshold,negativethreshold,smoothfactor,minbeamfrac,cutthreshold,growiterations,dogrowprune,minpercentchange,verbose,fastnoise,restart,savemodel,calcres,calcpsf,psfcutoff,parallel] _invocation_parameters = OrderedDict( ) if any(map(lambda x: x is not None,_arguments)): # invoke python style # set the non sub-parameters that are not None local_global = { } if vis is not None: local_global['vis'] = vis if imageprefix is not None: local_global['imageprefix'] = imageprefix if imagesuffix is not None: local_global['imagesuffix'] = imagesuffix if ncpu is not None: local_global['ncpu'] = ncpu if twidth is not None: local_global['twidth'] = twidth if doreg is not None: local_global['doreg'] = doreg if overwrite is not None: local_global['overwrite'] = overwrite if selectdata is not None: local_global['selectdata'] = selectdata if datacolumn is not None: local_global['datacolumn'] = datacolumn if imagename is not None: local_global['imagename'] = imagename if imsize is not None: local_global['imsize'] = imsize if cell is not None: local_global['cell'] = cell if phasecenter is not None: local_global['phasecenter'] = phasecenter if stokes is not None: local_global['stokes'] = stokes if projection is not None: local_global['projection'] = projection if startmodel is not None: local_global['startmodel'] = startmodel if specmode is not None: local_global['specmode'] = specmode if gridder is not None: local_global['gridder'] = gridder if deconvolver is not None: local_global['deconvolver'] = deconvolver if restoration is not None: local_global['restoration'] = restoration if outlierfile is not None: local_global['outlierfile'] = outlierfile if weighting is not None: local_global['weighting'] = weighting if niter is not None: local_global['niter'] = niter if usemask is not None: local_global['usemask'] = usemask if fastnoise is not None: local_global['fastnoise'] = fastnoise if restart is not None: local_global['restart'] = restart if savemodel is not None: local_global['savemodel'] = savemodel if calcres is not None: local_global['calcres'] = calcres if calcpsf is not None: local_global['calcpsf'] = calcpsf if parallel is not None: local_global['parallel'] = parallel # the invocation parameters for the non-subparameters can now be set - this picks up those defaults _invocation_parameters['vis'] = self.__vis( local_global ) _invocation_parameters['imageprefix'] = self.__imageprefix( local_global ) _invocation_parameters['imagesuffix'] = self.__imagesuffix( local_global ) _invocation_parameters['ncpu'] = self.__ncpu( local_global ) _invocation_parameters['twidth'] = self.__twidth( local_global ) _invocation_parameters['doreg'] = self.__doreg( local_global ) _invocation_parameters['overwrite'] = self.__overwrite( local_global ) _invocation_parameters['selectdata'] = self.__selectdata( local_global ) _invocation_parameters['datacolumn'] = self.__datacolumn( local_global ) _invocation_parameters['imagename'] = self.__imagename( local_global ) _invocation_parameters['imsize'] = self.__imsize( local_global ) _invocation_parameters['cell'] = self.__cell( local_global ) _invocation_parameters['phasecenter'] = self.__phasecenter( local_global ) _invocation_parameters['stokes'] = self.__stokes( local_global ) _invocation_parameters['projection'] = self.__projection( local_global ) _invocation_parameters['startmodel'] = self.__startmodel( local_global ) _invocation_parameters['specmode'] = self.__specmode( local_global ) _invocation_parameters['gridder'] = self.__gridder( local_global ) _invocation_parameters['deconvolver'] = self.__deconvolver( local_global ) _invocation_parameters['restoration'] = self.__restoration( local_global ) _invocation_parameters['outlierfile'] = self.__outlierfile( local_global ) _invocation_parameters['weighting'] = self.__weighting( local_global ) _invocation_parameters['niter'] = self.__niter( local_global ) _invocation_parameters['usemask'] = self.__usemask( local_global ) _invocation_parameters['fastnoise'] = self.__fastnoise( local_global ) _invocation_parameters['restart'] = self.__restart( local_global ) _invocation_parameters['savemodel'] = self.__savemodel( local_global ) _invocation_parameters['calcres'] = self.__calcres( local_global ) _invocation_parameters['calcpsf'] = self.__calcpsf( local_global ) _invocation_parameters['parallel'] = self.__parallel( local_global ) # the sub-parameters can then be set. Use the supplied value if not None, else the function, which gets the appropriate default _invocation_parameters['usephacenter'] = self.__usephacenter( _invocation_parameters ) if usephacenter is None else usephacenter _invocation_parameters['reftime'] = self.__reftime( _invocation_parameters ) if reftime is None else reftime _invocation_parameters['toTb'] = self.__toTb( _invocation_parameters ) if toTb is None else toTb _invocation_parameters['sclfactor'] = self.__sclfactor( _invocation_parameters ) if sclfactor is None else sclfactor _invocation_parameters['subregion'] = self.__subregion( _invocation_parameters ) if subregion is None else subregion _invocation_parameters['docompress'] = self.__docompress( _invocation_parameters ) if docompress is None else docompress _invocation_parameters['field'] = self.__field( _invocation_parameters ) if field is None else field _invocation_parameters['spw'] = self.__spw( _invocation_parameters ) if spw is None else spw _invocation_parameters['timerange'] = self.__timerange( _invocation_parameters ) if timerange is None else timerange _invocation_parameters['uvrange'] = self.__uvrange( _invocation_parameters ) if uvrange is None else uvrange _invocation_parameters['antenna'] = self.__antenna( _invocation_parameters ) if antenna is None else antenna _invocation_parameters['scan'] = self.__scan( _invocation_parameters ) if scan is None else scan _invocation_parameters['observation'] = self.__observation( _invocation_parameters ) if observation is None else observation _invocation_parameters['intent'] = self.__intent( _invocation_parameters ) if intent is None else intent _invocation_parameters['reffreq'] = self.__reffreq( _invocation_parameters ) if reffreq is None else reffreq _invocation_parameters['nchan'] = self.__nchan( _invocation_parameters ) if nchan is None else nchan _invocation_parameters['start'] = self.__start( _invocation_parameters ) if start is None else start _invocation_parameters['width'] = self.__width( _invocation_parameters ) if width is None else width _invocation_parameters['outframe'] = self.__outframe( _invocation_parameters ) if outframe is None else outframe _invocation_parameters['veltype'] = self.__veltype( _invocation_parameters ) if veltype is None else veltype _invocation_parameters['restfreq'] = self.__restfreq( _invocation_parameters ) if restfreq is None else restfreq _invocation_parameters['interpolation'] = self.__interpolation( _invocation_parameters ) if interpolation is None else interpolation _invocation_parameters['perchanweightdensity'] = self.__perchanweightdensity( _invocation_parameters ) if perchanweightdensity is None else perchanweightdensity _invocation_parameters['facets'] = self.__facets( _invocation_parameters ) if facets is None else facets _invocation_parameters['psfphasecenter'] = self.__psfphasecenter( _invocation_parameters ) if psfphasecenter is None else psfphasecenter _invocation_parameters['wprojplanes'] = self.__wprojplanes( _invocation_parameters ) if wprojplanes is None else wprojplanes _invocation_parameters['vptable'] = self.__vptable( _invocation_parameters ) if vptable is None else vptable _invocation_parameters['mosweight'] = self.__mosweight( _invocation_parameters ) if mosweight is None else mosweight _invocation_parameters['aterm'] = self.__aterm( _invocation_parameters ) if aterm is None else aterm _invocation_parameters['psterm'] = self.__psterm( _invocation_parameters ) if psterm is None else psterm _invocation_parameters['wbawp'] = self.__wbawp( _invocation_parameters ) if wbawp is None else wbawp _invocation_parameters['conjbeams'] = self.__conjbeams( _invocation_parameters ) if conjbeams is None else conjbeams _invocation_parameters['cfcache'] = self.__cfcache( _invocation_parameters ) if cfcache is None else cfcache _invocation_parameters['usepointing'] = self.__usepointing( _invocation_parameters ) if usepointing is None else usepointing _invocation_parameters['computepastep'] = self.__computepastep( _invocation_parameters ) if computepastep is None else computepastep _invocation_parameters['rotatepastep'] = self.__rotatepastep( _invocation_parameters ) if rotatepastep is None else rotatepastep _invocation_parameters['pointingoffsetsigdev'] = self.__pointingoffsetsigdev( _invocation_parameters ) if pointingoffsetsigdev is None else pointingoffsetsigdev _invocation_parameters['pblimit'] = self.__pblimit( _invocation_parameters ) if pblimit is None else pblimit _invocation_parameters['normtype'] = self.__normtype( _invocation_parameters ) if normtype is None else normtype _invocation_parameters['scales'] = self.__scales( _invocation_parameters ) if scales is None else scales _invocation_parameters['nterms'] = self.__nterms( _invocation_parameters ) if nterms is None else nterms _invocation_parameters['smallscalebias'] = self.__smallscalebias( _invocation_parameters ) if smallscalebias is None else smallscalebias _invocation_parameters['restoringbeam'] = self.__restoringbeam( _invocation_parameters ) if restoringbeam is None else restoringbeam _invocation_parameters['pbcor'] = self.__pbcor( _invocation_parameters ) if pbcor is None else pbcor _invocation_parameters['robust'] = self.__robust( _invocation_parameters ) if robust is None else robust _invocation_parameters['noise'] = self.__noise( _invocation_parameters ) if noise is None else noise _invocation_parameters['npixels'] = self.__npixels( _invocation_parameters ) if npixels is None else npixels _invocation_parameters['uvtaper'] = self.__uvtaper( _invocation_parameters ) if uvtaper is None else uvtaper _invocation_parameters['gain'] = self.__gain( _invocation_parameters ) if gain is None else gain _invocation_parameters['threshold'] = self.__threshold( _invocation_parameters ) if threshold is None else threshold _invocation_parameters['nsigma'] = self.__nsigma( _invocation_parameters ) if nsigma is None else nsigma _invocation_parameters['cycleniter'] = self.__cycleniter( _invocation_parameters ) if cycleniter is None else cycleniter _invocation_parameters['cyclefactor'] = self.__cyclefactor( _invocation_parameters ) if cyclefactor is None else cyclefactor _invocation_parameters['minpsffraction'] = self.__minpsffraction( _invocation_parameters ) if minpsffraction is None else minpsffraction _invocation_parameters['maxpsffraction'] = self.__maxpsffraction( _invocation_parameters ) if maxpsffraction is None else maxpsffraction _invocation_parameters['interactive'] = self.__interactive( _invocation_parameters ) if interactive is None else interactive _invocation_parameters['mask'] = self.__mask( _invocation_parameters ) if mask is None else mask _invocation_parameters['pbmask'] = self.__pbmask( _invocation_parameters ) if pbmask is None else pbmask _invocation_parameters['sidelobethreshold'] = self.__sidelobethreshold( _invocation_parameters ) if sidelobethreshold is None else sidelobethreshold _invocation_parameters['noisethreshold'] = self.__noisethreshold( _invocation_parameters ) if noisethreshold is None else noisethreshold _invocation_parameters['lownoisethreshold'] = self.__lownoisethreshold( _invocation_parameters ) if lownoisethreshold is None else lownoisethreshold _invocation_parameters['negativethreshold'] = self.__negativethreshold( _invocation_parameters ) if negativethreshold is None else negativethreshold _invocation_parameters['smoothfactor'] = self.__smoothfactor( _invocation_parameters ) if smoothfactor is None else smoothfactor _invocation_parameters['minbeamfrac'] = self.__minbeamfrac( _invocation_parameters ) if minbeamfrac is None else minbeamfrac _invocation_parameters['cutthreshold'] = self.__cutthreshold( _invocation_parameters ) if cutthreshold is None else cutthreshold _invocation_parameters['growiterations'] = self.__growiterations( _invocation_parameters ) if growiterations is None else growiterations _invocation_parameters['dogrowprune'] = self.__dogrowprune( _invocation_parameters ) if dogrowprune is None else dogrowprune _invocation_parameters['minpercentchange'] = self.__minpercentchange( _invocation_parameters ) if minpercentchange is None else minpercentchange _invocation_parameters['verbose'] = self.__verbose( _invocation_parameters ) if verbose is None else verbose _invocation_parameters['psfcutoff'] = self.__psfcutoff( _invocation_parameters ) if psfcutoff is None else psfcutoff else: # invoke with inp/go semantics _invocation_parameters['vis'] = self.__vis( self.__globals_( ) ) _invocation_parameters['imageprefix'] = self.__imageprefix( self.__globals_( ) ) _invocation_parameters['imagesuffix'] = self.__imagesuffix( self.__globals_( ) ) _invocation_parameters['ncpu'] = self.__ncpu( self.__globals_( ) ) _invocation_parameters['twidth'] = self.__twidth( self.__globals_( ) ) _invocation_parameters['doreg'] = self.__doreg( self.__globals_( ) ) _invocation_parameters['usephacenter'] = self.__usephacenter( self.__globals_( ) ) _invocation_parameters['reftime'] = self.__reftime( self.__globals_( ) ) _invocation_parameters['toTb'] = self.__toTb( self.__globals_( ) ) _invocation_parameters['sclfactor'] = self.__sclfactor( self.__globals_( ) ) _invocation_parameters['subregion'] = self.__subregion( self.__globals_( ) ) _invocation_parameters['docompress'] = self.__docompress( self.__globals_( ) ) _invocation_parameters['overwrite'] = self.__overwrite( self.__globals_( ) ) _invocation_parameters['selectdata'] = self.__selectdata( self.__globals_( ) ) _invocation_parameters['field'] = self.__field( self.__globals_( ) ) _invocation_parameters['spw'] = self.__spw( self.__globals_( ) ) _invocation_parameters['timerange'] = self.__timerange( self.__globals_( ) ) _invocation_parameters['uvrange'] = self.__uvrange( self.__globals_( ) ) _invocation_parameters['antenna'] = self.__antenna( self.__globals_( ) ) _invocation_parameters['scan'] = self.__scan( self.__globals_( ) ) _invocation_parameters['observation'] = self.__observation( self.__globals_( ) ) _invocation_parameters['intent'] = self.__intent( self.__globals_( ) ) _invocation_parameters['datacolumn'] = self.__datacolumn( self.__globals_( ) ) _invocation_parameters['imagename'] = self.__imagename( self.__globals_( ) ) _invocation_parameters['imsize'] = self.__imsize( self.__globals_( ) ) _invocation_parameters['cell'] = self.__cell( self.__globals_( ) ) _invocation_parameters['phasecenter'] = self.__phasecenter( self.__globals_( ) ) _invocation_parameters['stokes'] = self.__stokes( self.__globals_( ) ) _invocation_parameters['projection'] = self.__projection( self.__globals_( ) ) _invocation_parameters['startmodel'] = self.__startmodel( self.__globals_( ) ) _invocation_parameters['specmode'] = self.__specmode( self.__globals_( ) ) _invocation_parameters['reffreq'] = self.__reffreq( self.__globals_( ) ) _invocation_parameters['nchan'] = self.__nchan( self.__globals_( ) ) _invocation_parameters['start'] = self.__start( self.__globals_( ) ) _invocation_parameters['width'] = self.__width( self.__globals_( ) ) _invocation_parameters['outframe'] = self.__outframe( self.__globals_( ) ) _invocation_parameters['veltype'] = self.__veltype( self.__globals_( ) ) _invocation_parameters['restfreq'] = self.__restfreq( self.__globals_( ) ) _invocation_parameters['interpolation'] = self.__interpolation( self.__globals_( ) ) _invocation_parameters['perchanweightdensity'] = self.__perchanweightdensity( self.__globals_( ) ) _invocation_parameters['gridder'] = self.__gridder( self.__globals_( ) ) _invocation_parameters['facets'] = self.__facets( self.__globals_( ) ) _invocation_parameters['psfphasecenter'] = self.__psfphasecenter( self.__globals_( ) ) _invocation_parameters['wprojplanes'] = self.__wprojplanes( self.__globals_( ) ) _invocation_parameters['vptable'] = self.__vptable( self.__globals_( ) ) _invocation_parameters['mosweight'] = self.__mosweight( self.__globals_( ) ) _invocation_parameters['aterm'] = self.__aterm( self.__globals_( ) ) _invocation_parameters['psterm'] = self.__psterm( self.__globals_( ) ) _invocation_parameters['wbawp'] = self.__wbawp( self.__globals_( ) ) _invocation_parameters['conjbeams'] = self.__conjbeams( self.__globals_( ) ) _invocation_parameters['cfcache'] = self.__cfcache( self.__globals_( ) ) _invocation_parameters['usepointing'] = self.__usepointing( self.__globals_( ) ) _invocation_parameters['computepastep'] = self.__computepastep( self.__globals_( ) ) _invocation_parameters['rotatepastep'] = self.__rotatepastep( self.__globals_( ) ) _invocation_parameters['pointingoffsetsigdev'] = self.__pointingoffsetsigdev( self.__globals_( ) ) _invocation_parameters['pblimit'] = self.__pblimit( self.__globals_( ) ) _invocation_parameters['normtype'] = self.__normtype( self.__globals_( ) ) _invocation_parameters['deconvolver'] = self.__deconvolver( self.__globals_( ) ) _invocation_parameters['scales'] = self.__scales( self.__globals_( ) ) _invocation_parameters['nterms'] = self.__nterms( self.__globals_( ) ) _invocation_parameters['smallscalebias'] = self.__smallscalebias( self.__globals_( ) ) _invocation_parameters['restoration'] = self.__restoration( self.__globals_( ) ) _invocation_parameters['restoringbeam'] = self.__restoringbeam( self.__globals_( ) ) _invocation_parameters['pbcor'] = self.__pbcor( self.__globals_( ) ) _invocation_parameters['outlierfile'] = self.__outlierfile( self.__globals_( ) ) _invocation_parameters['weighting'] = self.__weighting( self.__globals_( ) ) _invocation_parameters['robust'] = self.__robust( self.__globals_( ) ) _invocation_parameters['noise'] = self.__noise( self.__globals_( ) ) _invocation_parameters['npixels'] = self.__npixels( self.__globals_( ) ) _invocation_parameters['uvtaper'] = self.__uvtaper( self.__globals_( ) ) _invocation_parameters['niter'] = self.__niter( self.__globals_( ) ) _invocation_parameters['gain'] = self.__gain( self.__globals_( ) ) _invocation_parameters['threshold'] = self.__threshold( self.__globals_( ) ) _invocation_parameters['nsigma'] = self.__nsigma( self.__globals_( ) ) _invocation_parameters['cycleniter'] = self.__cycleniter( self.__globals_( ) ) _invocation_parameters['cyclefactor'] = self.__cyclefactor( self.__globals_( ) ) _invocation_parameters['minpsffraction'] = self.__minpsffraction( self.__globals_( ) ) _invocation_parameters['maxpsffraction'] = self.__maxpsffraction( self.__globals_( ) ) _invocation_parameters['interactive'] = self.__interactive( self.__globals_( ) ) _invocation_parameters['usemask'] = self.__usemask( self.__globals_( ) ) _invocation_parameters['mask'] = self.__mask( self.__globals_( ) ) _invocation_parameters['pbmask'] = self.__pbmask( self.__globals_( ) ) _invocation_parameters['sidelobethreshold'] = self.__sidelobethreshold( self.__globals_( ) ) _invocation_parameters['noisethreshold'] = self.__noisethreshold( self.__globals_( ) ) _invocation_parameters['lownoisethreshold'] = self.__lownoisethreshold( self.__globals_( ) ) _invocation_parameters['negativethreshold'] = self.__negativethreshold( self.__globals_( ) ) _invocation_parameters['smoothfactor'] = self.__smoothfactor( self.__globals_( ) ) _invocation_parameters['minbeamfrac'] = self.__minbeamfrac( self.__globals_( ) ) _invocation_parameters['cutthreshold'] = self.__cutthreshold( self.__globals_( ) ) _invocation_parameters['growiterations'] = self.__growiterations( self.__globals_( ) ) _invocation_parameters['dogrowprune'] = self.__dogrowprune( self.__globals_( ) ) _invocation_parameters['minpercentchange'] = self.__minpercentchange( self.__globals_( ) ) _invocation_parameters['verbose'] = self.__verbose( self.__globals_( ) ) _invocation_parameters['fastnoise'] = self.__fastnoise( self.__globals_( ) ) _invocation_parameters['restart'] = self.__restart( self.__globals_( ) ) _invocation_parameters['savemodel'] = self.__savemodel( self.__globals_( ) ) _invocation_parameters['calcres'] = self.__calcres( self.__globals_( ) ) _invocation_parameters['calcpsf'] = self.__calcpsf( self.__globals_( ) ) _invocation_parameters['psfcutoff'] = self.__psfcutoff( self.__globals_( ) ) _invocation_parameters['parallel'] = self.__parallel( self.__globals_( ) ) try: with open(_prefile,'w') as _f: for _i in _invocation_parameters: _f.write("%-20s = %s\n" % (_i,noobj(repr(_invocation_parameters[_i])))) _f.write("#ptclean6( ") count = 0 for _i in _invocation_parameters: _f.write("%s=%s" % (_i,noobj(repr(_invocation_parameters[_i])))) count += 1 if count < len(_invocation_parameters): _f.write(",") _f.write(" )\n") except: pass try: _return_result_ = _ptclean6_t( _invocation_parameters['vis'],_invocation_parameters['imageprefix'],_invocation_parameters['imagesuffix'],_invocation_parameters['ncpu'],_invocation_parameters['twidth'],_invocation_parameters['doreg'],_invocation_parameters['usephacenter'],_invocation_parameters['reftime'],_invocation_parameters['toTb'],_invocation_parameters['sclfactor'],_invocation_parameters['subregion'],_invocation_parameters['docompress'],_invocation_parameters['overwrite'],_invocation_parameters['selectdata'],_invocation_parameters['field'],_invocation_parameters['spw'],_invocation_parameters['timerange'],_invocation_parameters['uvrange'],_invocation_parameters['antenna'],_invocation_parameters['scan'],_invocation_parameters['observation'],_invocation_parameters['intent'],_invocation_parameters['datacolumn'],_invocation_parameters['imagename'],_invocation_parameters['imsize'],_invocation_parameters['cell'],_invocation_parameters['phasecenter'],_invocation_parameters['stokes'],_invocation_parameters['projection'],_invocation_parameters['startmodel'],_invocation_parameters['specmode'],_invocation_parameters['reffreq'],_invocation_parameters['nchan'],_invocation_parameters['start'],_invocation_parameters['width'],_invocation_parameters['outframe'],_invocation_parameters['veltype'],_invocation_parameters['restfreq'],_invocation_parameters['interpolation'],_invocation_parameters['perchanweightdensity'],_invocation_parameters['gridder'],_invocation_parameters['facets'],_invocation_parameters['psfphasecenter'],_invocation_parameters['wprojplanes'],_invocation_parameters['vptable'],_invocation_parameters['mosweight'],_invocation_parameters['aterm'],_invocation_parameters['psterm'],_invocation_parameters['wbawp'],_invocation_parameters['conjbeams'],_invocation_parameters['cfcache'],_invocation_parameters['usepointing'],_invocation_parameters['computepastep'],_invocation_parameters['rotatepastep'],_invocation_parameters['pointingoffsetsigdev'],_invocation_parameters['pblimit'],_invocation_parameters['normtype'],_invocation_parameters['deconvolver'],_invocation_parameters['scales'],_invocation_parameters['nterms'],_invocation_parameters['smallscalebias'],_invocation_parameters['restoration'],_invocation_parameters['restoringbeam'],_invocation_parameters['pbcor'],_invocation_parameters['outlierfile'],_invocation_parameters['weighting'],_invocation_parameters['robust'],_invocation_parameters['noise'],_invocation_parameters['npixels'],_invocation_parameters['uvtaper'],_invocation_parameters['niter'],_invocation_parameters['gain'],_invocation_parameters['threshold'],_invocation_parameters['nsigma'],_invocation_parameters['cycleniter'],_invocation_parameters['cyclefactor'],_invocation_parameters['minpsffraction'],_invocation_parameters['maxpsffraction'],_invocation_parameters['interactive'],_invocation_parameters['usemask'],_invocation_parameters['mask'],_invocation_parameters['pbmask'],_invocation_parameters['sidelobethreshold'],_invocation_parameters['noisethreshold'],_invocation_parameters['lownoisethreshold'],_invocation_parameters['negativethreshold'],_invocation_parameters['smoothfactor'],_invocation_parameters['minbeamfrac'],_invocation_parameters['cutthreshold'],_invocation_parameters['growiterations'],_invocation_parameters['dogrowprune'],_invocation_parameters['minpercentchange'],_invocation_parameters['verbose'],_invocation_parameters['fastnoise'],_invocation_parameters['restart'],_invocation_parameters['savemodel'],_invocation_parameters['calcres'],_invocation_parameters['calcpsf'],_invocation_parameters['psfcutoff'],_invocation_parameters['parallel'] ) except Exception as e: from traceback import format_exc from casatasks import casalog casalog.origin('ptclean6') casalog.post("Exception Reported: Error in ptclean6: %s" % str(e),'SEVERE') casalog.post(format_exc( )) _return_result_ = False try: os.rename(_prefile,_postfile) except: pass return _return_result_ ptclean6 = _ptclean6( )
##################### generated by xml-casa (v2) from ptclean6.xml ################## ##################### 6a89d05724a14fedd7b8ceb75d841936 ############################## from __future__ import absolute_import from casashell.private.stack_manip import find_local as __sf__ from casashell.private.stack_manip import find_frame as _find_frame from casatools.typecheck import validator as _pc from casatools.coercetype import coerce as _coerce from suncasatasks import ptclean6 as _ptclean6_t from collections import OrderedDict import numpy import sys import os import shutil def static_var(varname, value): def decorate(func): setattr(func, varname, value) return func return decorate class _ptclean6: """ ptclean6 ---- Parallelized tclean in consecutive time steps Parallelized clean in consecutive time steps. Packed over CASA 6 tclean. --------- parameter descriptions --------------------------------------------- vis Name(s) of input visibility file(s) default: none; example: vis='ngc5921.ms' vis=['ngc5921a.ms','ngc5921b.ms']; multiple MSes imageprefix Prefix of output image names (usually useful in defining the output path) imagesuffix Suffix of output image names (usually useful in specifyting the image type, version, etc.) ncpu Number of cpu cores to use twidth Number of time pixels to average doreg True if use vla_prep to register the image usephacenter True if use the phacenter information from the measurement set (e.g., VLA); False to assume the phase center is at the solar disk center (EOVSA) reftime Reference time of the J2000 coordinates associated with the ephemeris target. e.g., "2012/03/03/12:00". This is used for helioimage2fits.py to find the solar x y offset in order to register the image. If not set, use the actual timerange of the image (default) toTb True if convert to brightness temperature sclfactor scale the brightness temperature up by its value subregion The name of a CASA region string The name of a CASA image or region file or region string. Only locations within the region will output to the fits file. If regions specified fall completely outside of the image, ptclean6 will throw an error. Manual mask options/examples : subregion='box[[224pix,224pix],[288pix,288pix]]' : A CASA region string. docompress True if compress the output fits files overwrite True if overwrite the image selectdata Enable data selection parameters. field to image or mosaic. Use field id(s) or name(s). ['go listobs' to obtain the list id's or names] default: ''= all fields If field string is a non-negative integer, it is assumed to be a field index otherwise, it is assumed to be a field name field='0~2'; field ids 0,1,2 field='0,4,5~7'; field ids 0,4,5,6,7 field='3C286,3C295'; field named 3C286 and 3C295 field = '3,4C*'; field id 3, all names starting with 4C For multiple MS input, a list of field strings can be used: field = ['0~2','0~4']; field ids 0-2 for the first MS and 0-4 for the second field = '0~2'; field ids 0-2 for all input MSes spw l window/channels NOTE: channels de-selected here will contain all zeros if selected by the parameter mode subparameters. default: ''=all spectral windows and channels spw='0~2,4'; spectral windows 0,1,2,4 (all channels) spw='0:5~61'; spw 0, channels 5 to 61 spw='<2'; spectral windows less than 2 (i.e. 0,1) spw='0,10,3:3~45'; spw 0,10 all channels, spw 3, channels 3 to 45. spw='0~2:2~6'; spw 0,1,2 with channels 2 through 6 in each. For multiple MS input, a list of spw strings can be used: spw=['0','0~3']; spw ids 0 for the first MS and 0-3 for the second spw='0~3' spw ids 0-3 for all input MS spw='3:10~20;50~60' for multiple channel ranges within spw id 3 spw='3:10~20;50~60,4:0~30' for different channel ranges for spw ids 3 and 4 spw='0:0~10,1:20~30,2:1;2;3'; spw 0, channels 0-10, spw 1, channels 20-30, and spw 2, channels, 1,2 and 3 spw='1~4;6:15~48' for channels 15 through 48 for spw ids 1,2,3,4 and 6 timerange Range of time to select from data default: '' (all); examples, timerange = 'YYYY/MM/DD/hh:mm:ss~YYYY/MM/DD/hh:mm:ss' Note: if YYYY/MM/DD is missing date defaults to first day in data set timerange='09:14:0~09:54:0' picks 40 min on first day timerange='25:00:00~27:30:00' picks 1 hr to 3 hr 30min on NEXT day timerange='09:44:00' pick data within one integration of time timerange='> 10:24:00' data after this time For multiple MS input, a list of timerange strings can be used: timerange=['09:14:0~09:54:0','> 10:24:00'] timerange='09:14:0~09:54:0''; apply the same timerange for all input MSes uvrange Select data within uvrange (default unit is meters) default: '' (all); example: uvrange='0~1000klambda'; uvrange from 0-1000 kilo-lambda uvrange='> 4klambda';uvranges greater than 4 kilo lambda For multiple MS input, a list of uvrange strings can be used: uvrange=['0~1000klambda','100~1000klamda'] uvrange='0~1000klambda'; apply 0-1000 kilo-lambda for all input MSes antenna Select data based on antenna/baseline default: '' (all) If antenna string is a non-negative integer, it is assumed to be an antenna index, otherwise, it is considered an antenna name. antenna='5\&6'; baseline between antenna index 5 and index 6. antenna='VA05\&VA06'; baseline between VLA antenna 5 and 6. antenna='5\&6;7\&8'; baselines 5-6 and 7-8 antenna='5'; all baselines with antenna index 5 antenna='05'; all baselines with antenna number 05 (VLA old name) antenna='5,6,9'; all baselines with antennas 5,6,9 index number For multiple MS input, a list of antenna strings can be used: antenna=['5','5\&6']; antenna='5'; antenna index 5 for all input MSes antenna='!DV14'; use all antennas except DV14 scan Scan number range default: '' (all) example: scan='1~5' For multiple MS input, a list of scan strings can be used: scan=['0~100','10~200'] scan='0~100; scan ids 0-100 for all input MSes observation Observation ID range default: '' (all) example: observation='1~5' intent Scan Intent(s) default: '' (all) example: intent='TARGET_SOURCE' example: intent='TARGET_SOURCE1,TARGET_SOURCE2' example: intent='TARGET_POINTING*' datacolumn Data column to image (data or observed, corrected) default:'corrected' ( If 'corrected' does not exist, it will use 'data' instead ) imagename Pre-name of output images example : imagename='try' Output images will be (a subset of) : try.psf - Point spread function try.residual - Residual image try.image - Restored image try.model - Model image (contains only flux components) try.sumwt - Single pixel image containing sum-of-weights. (for natural weighting, sensitivity=1/sqrt(sumwt)) try.pb - Primary beam model (values depend on the gridder used) Widefield projection algorithms (gridder=mosaic,awproject) will compute the following images too. try.weight - FT of gridded weights or the un-normalized sum of PB-square (for all pointings) Here, PB = sqrt(weight) normalized to a maximum of 1.0 For multi-term wideband imaging, all relevant images above will have additional .tt0,.tt1, etc suffixes to indicate Taylor terms, plus the following extra output images. try.alpha - spectral index try.alpha.error - estimate of error on spectral index try.beta - spectral curvature (if nterms \> 2) Tip : Include a directory name in 'imagename' for all output images to be sent there instead of the current working directory : imagename='mydir/try' Tip : Restarting an imaging run without changing 'imagename' implies continuation from the existing model image on disk. - If 'startmodel' was initially specified it needs to be set to "" for the restart run (or tclean will exit with an error message). - By default, the residual image and psf will be recomputed but if no changes were made to relevant parameters between the runs, set calcres=False, calcpsf=False to resume directly from the minor cycle without the (unnecessary) first major cycle. To automatically change 'imagename' with a numerical increment, set restart=False (see tclean docs for 'restart'). Note : All imaging runs will by default produce restored images. For a niter=0 run, this will be redundant and can optionally be turned off via the 'restoration=T/F' parameter. imsize Number of pixels example : imsize = [350,250] imsize = 500 is equivalent to [500,500] To take proper advantage of internal optimized FFT routines, the number of pixels must be even and factorizable by 2,3,5,7 only. cell Cell size example: cell=['0.5arcsec,'0.5arcsec'] or cell=['1arcmin', '1arcmin'] cell = '1arcsec' is equivalent to ['1arcsec','1arcsec'] phasecenter Phase center of the image (string or field id); if the phasecenter is the name known major solar system object ('MERCURY', 'VENUS', 'MARS', 'JUPITER', 'SATURN', 'URANUS', 'NEPTUNE', 'PLUTO', 'SUN', 'MOON') or is an ephemerides table then that source is tracked and the background sources get smeared. There is a special case, when phasecenter='TRACKFIELD', which will use the ephemerides or polynomial phasecenter in the FIELD table of the MS's as the source center to track. example: phasecenter=6 phasecenter='J2000 19h30m00 -40d00m00' phasecenter='J2000 292.5deg -40.0deg' phasecenter='J2000 5.105rad -0.698rad' phasecenter='ICRS 13:05:27.2780 -049.28.04.458' phasecenter='myComet_ephem.tab' phasecenter='MOON' phasecenter='TRACKFIELD' stokes Stokes Planes to make default='I'; example: stokes='IQUV'; Options: 'I','Q','U','V','IV','QU','IQ','UV','IQUV','RR','LL','XX','YY','RRLL','XXYY','pseudoI' Note : Due to current internal code constraints, if any correlation pair is flagged, by default, no data for that row in the MS will be used. So, in an MS with XX,YY, if only YY is flagged, neither a Stokes I image nor an XX image can be made from those data points. In such a situation, please split out only the unflagged correlation into a separate MS. Note : The 'pseudoI' option is a partial solution, allowing Stokes I imaging when either of the parallel-hand correlations are unflagged. The remaining constraints shall be removed (where logical) in a future release. projection Coordinate projection Examples : SIN, NCP A list of supported (but untested) projections can be found here : http://casa.nrao.edu/active/docs/doxygen/html/classcasa_1_1Projection.html#a3d5f9ec787e4eabdce57ab5edaf7c0cd startmodel Name of starting model image The contents of the supplied starting model image will be copied to the imagename.model before the run begins. example : startmodel = 'singledish.im' For deconvolver='mtmfs', one image per Taylor term must be provided. example : startmodel = ['try.model.tt0', 'try.model.tt1'] startmodel = ['try.model.tt0'] will use a starting model only for the zeroth order term. startmodel = ['','try.model.tt1'] will use a starting model only for the first order term. This starting model can be of a different image shape and size from what is currently being imaged. If so, an image regrid is first triggered to resample the input image onto the target coordinate system. A common usage is to set this parameter equal to a single dish image Negative components in the model image will be included as is. [ Note : If an error occurs during image resampling/regridding, please try using task imregrid to resample the starting model image onto a CASA image with the target shape and coordinate system before supplying it via startmodel ] specmode Spectral definition mode (mfs,cube,cubedata, cubesource) mode='mfs' : Continuum imaging with only one output image channel. (mode='cont' can also be used here) mode='cube' : Spectral line imaging with one or more channels Parameters start, width,and nchan define the spectral coordinate system and can be specified either in terms of channel numbers, frequency or velocity in whatever spectral frame is specified in 'outframe'. All internal and output images are made with outframe as the base spectral frame. However imaging code internally uses the fixed spectral frame, LSRK for automatic internal software Doppler tracking so that a spectral line observed over an extended time range will line up appropriately. Therefore the output images have additional spectral frame conversion layer in LSRK on the top the base frame. (Note : Even if the input parameters are specified in a frame other than LSRK, the viewer still displays spectral axis in LSRK by default because of the conversion frame layer mentioned above. The viewer can be used to relabel the spectral axis in any desired frame - via the spectral reference option under axis label properties in the data display options window.) mode='cubedata' : Spectral line imaging with one or more channels There is no internal software Doppler tracking so a spectral line observed over an extended time range may be smeared out in frequency. There is strictly no valid spectral frame with which to label the output images, but they will list the frame defined in the MS. mode='cubesource': Spectral line imaging while tracking moving source (near field or solar system objects). The velocity of the source is accounted and the frequency reported is in the source frame. As there is not SOURCE frame defined, the frame reported will be REST (as it may not be in the rest frame emission region may be moving w.r.t the systemic velocity frame) reffreq Reference frequency of the output image coordinate system Example : reffreq='1.5GHz' as a string with units. By default, it is calculated as the middle of the selected frequency range. For deconvolver='mtmfs' the Taylor expansion is also done about this specified reference frequency. nchan Number of channels in the output image For default (=-1), the number of channels will be automatically determined based on data selected by 'spw' with 'start' and 'width'. It is often easiest to leave nchan at the default value. example: nchan=100 start First channel (e.g. start=3,start=\'1.1GHz\',start=\'15343km/s\') of output cube images specified by data channel number (integer), velocity (string with a unit), or frequency (string with a unit). Default:''; The first channel is automatically determined based on the 'spw' channel selection and 'width'. When the channel number is used along with the channel selection in 'spw' (e.g. spw='0:6~100'), 'start' channel number is RELATIVE (zero-based) to the selected channels in 'spw'. So for the above example, start=1 means that the first image channel is the second selected data channel, which is channel 7. For specmode='cube', when velocity or frequency is used it is interpreted with the frame defined in outframe. [The parameters of the desired output cube can be estimated by using the 'transform' functionality of 'plotms'] examples: start='5.0km/s'; 1st channel, 5.0km/s in outframe start='22.3GHz'; 1st channel, 22.3GHz in outframe width Channel width (e.g. width=2,width=\'0.1MHz\',width=\'10km/s\') of output cube images specified by data channel number (integer), velocity (string with a unit), or or frequency (string with a unit). Default:''; data channel width The sign of width defines the direction of the channels to be incremented. For width specified in velocity or frequency with '-' in front gives image channels in decreasing velocity or frequency, respectively. For specmode='cube', when velocity or frequency is used it is interpreted with the reference frame defined in outframe. examples: width='2.0km/s'; results in channels with increasing velocity width='-2.0km/s'; results in channels with decreasing velocity width='40kHz'; results in channels with increasing frequency width=-2; results in channels averaged of 2 data channels incremented from high to low channel numbers outframe Spectral reference frame in which to interpret \'start\' and \'width\' Options: '','LSRK','LSRD','BARY','GEO','TOPO','GALACTO','LGROUP','CMB' example: outframe='bary' for Barycentric frame REST -- Rest frequency LSRD -- Local Standard of Rest (J2000) -- as the dynamical definition (IAU, [9,12,7] km/s in galactic coordinates) LSRK -- LSR as a kinematical (radio) definition -- 20.0 km/s in direction ra,dec = [270,+30] deg (B1900.0) BARY -- Barycentric (J2000) GEO --- Geocentric TOPO -- Topocentric GALACTO -- Galacto centric (with rotation of 220 km/s in direction l,b = [90,0] deg. LGROUP -- Local group velocity -- 308km/s towards l,b = [105,-7] deg (F. Ghigo) CMB -- CMB velocity -- 369.5km/s towards l,b = [264.4, 48.4] deg (F. Ghigo) DEFAULT = LSRK veltype Velocity type (radio, z, ratio, beta, gamma, optical) For start and/or width specified in velocity, specifies the velocity definition Options: 'radio','optical','z','beta','gamma','optical' NOTE: the viewer always defaults to displaying the 'radio' frame, but that can be changed in the position tracking pull down. The different types (with F = f/f0, the frequency ratio), are: Z = (-1 + 1/F) RATIO = (F) * RADIO = (1 - F) OPTICAL == Z BETA = ((1 - F2)/(1 + F2)) GAMMA = ((1 + F2)/2F) * RELATIVISTIC == BETA (== v/c) DEFAULT == RADIO Note that the ones with an '*' have no real interpretation (although the calculation will proceed) if given as a velocity. restfreq List of rest frequencies or a rest frequency in a string. Specify rest frequency to use for output image. *Currently it uses the first rest frequency in the list for translation of velocities. The list will be stored in the output images. Default: []; look for the rest frequency stored in the MS, if not available, use center frequency of the selected channels examples: restfreq=['1.42GHz'] restfreq='1.42GHz' interpolation Spectral interpolation (nearest,linear,cubic) Interpolation rules to use when binning data channels onto image channels and evaluating visibility values at the centers of image channels. Note : 'linear' and 'cubic' interpolation requires data points on both sides of each image frequency. Errors are therefore possible at edge channels, or near flagged data channels. When image channel width is much larger than the data channel width there is nothing much to be gained using linear or cubic thus not worth the extra computation involved. perchanweightdensity When calculating weight density for Briggs style weighting in a cube, this parameter determines whether to calculate the weight density for each channel independently (the default, True) or a common weight density for all of the selected data. This parameter has no meaning for continuum (specmode='mfs') imaging or for natural and radial weighting schemes. For cube imaging perchanweightdensity=True is a recommended option that provides more uniform sensitivity per channel for cubes, but with generally larger psfs than the perchanweightdensity=False (prior behavior) option. When using Briggs style weight with perchanweightdensity=True, the imaging weight density calculations use only the weights of data that contribute specifically to that channel. On the other hand, when perchanweightdensity=False, the imaging weight density calculations sum all of the weights from all of the data channels selected whose (u,v) falls in a given uv cell on the weight density grid. Since the aggregated weights, in any given uv cell, will change depending on the number of channels included when imaging, the psf calculated for a given frequency channel will also necessarily change, resulting in variability in the psf for a given frequency channel when perchanweightdensity=False. In general, perchanweightdensity=False results in smaller psfs for the same value of robustness compared to perchanweightdensity=True, but the rms noise as a function of channel varies and increases toward the edge channels; perchanweightdensity=True provides more uniform sensitivity per channel for cubes. This may make it harder to find estimates of continuum when perchanweightdensity=False. If you intend to image a large cube in many smaller subcubes and subsequently concatenate, it is advisable to use perchanweightdensity=True to avoid surprisingly varying sensitivity and psfs across the concatenated cube. gridder Gridding options (standard, wproject, widefield, mosaic, awproject) The following options choose different gridding convolution functions for the process of convolutional resampling of the measured visibilities onto a regular uv-grid prior to an inverse FFT. Model prediction (degridding) also uses these same functions. Several wide-field effects can be accounted for via careful choices of convolution functions. Gridding (degridding) runtime will rise in proportion to the support size of these convolution functions (in uv-pixels). standard : Prolate Spheroid with 7x7 uv pixel support size [ This mode can also be invoked using 'ft' or 'gridft' ] wproject : W-Projection algorithm to correct for the widefield non-coplanar baseline effect. [Cornwell et.al 2008] wprojplanes is the number of distinct w-values at which to compute and use different gridding convolution functions (see help for wprojplanes). Convolution function support size can range from 5x5 to few 100 x few 100. [ This mode can also be invoked using 'wprojectft' ] widefield : Facetted imaging with or without W-Projection per facet. A set of facets x facets subregions of the specified image are gridded separately using their respective phase centers (to minimize max W). Deconvolution is done on the joint full size image, using a PSF from the first subregion. wprojplanes=1 : standard prolate spheroid gridder per facet. wprojplanes > 1 : W-Projection gridder per facet. nfacets=1, wprojplanes > 1 : Pure W-Projection and no facetting nfacets=1, wprojplanes=1 : Same as standard,ft,gridft A combination of facetting and W-Projection is relevant only for very large fields of view. (In our current version of tclean, this combination runs only with parallel=False. mosaic : A-Projection with azimuthally symmetric beams without sidelobes, beam rotation or squint correction. Gridding convolution functions per visibility are computed from FTs of PB models per antenna. This gridder can be run on single fields as well as mosaics. VLA : PB polynomial fit model (Napier and Rots, 1982) EVLA : PB polynomial fit model (Perley, 2015) ALMA : Airy disks for a 10.7m dish (for 12m dishes) and 6.25m dish (for 7m dishes) each with 0.75m blockages (Hunter/Brogan 2011). Joint mosaic imaging supports heterogeneous arrays for ALMA. Typical gridding convolution function support sizes are between 7 and 50 depending on the desired accuracy (given by the uv cell size or image field of view). [ This mode can also be invoked using 'mosaicft' or 'ftmosaic' ] awproject : A-Projection with azimuthally asymmetric beams and including beam rotation, squint correction, conjugate frequency beams and W-projection. [Bhatnagar et.al, 2008] Gridding convolution functions are computed from aperture illumination models per antenna and optionally combined with W-Projection kernels and a prolate spheroid. This gridder can be run on single fields as well as mosaics. VLA : Uses ray traced model (VLA and EVLA) including feed leg and subreflector shadows, off-axis feed location (for beam squint and other polarization effects), and a Gaussian fit for the feed beams (Ref: Brisken 2009) ALMA : Similar ray-traced model as above (but the correctness of its polarization properties remains un-verified). Typical gridding convolution function support sizes are between 7 and 50 depending on the desired accuracy (given by the uv cell size or image field of view). When combined with W-Projection they can be significantly larger. [ This mode can also be invoked using 'awprojectft' ] imagemosaic : (untested implementation) Grid and iFT each pointing separately and combine the images as a linear mosaic (weighted by a PB model) in the image domain before a joint minor cycle. VLA/ALMA PB models are same as for gridder='mosaicft' ------ Notes on PB models : (1) Several different sources of PB models are used in the modes listed above. This is partly for reasons of algorithmic flexibility and partly due to the current lack of a common beam model repository or consensus on what beam models are most appropriate. (2) For ALMA and gridder='mosaic', ray-traced (TICRA) beams are also available via the vpmanager tool. For example, call the following before the tclean run. vp.setpbimage(telescope="ALMA", compleximage='/home/casa/data/trunk/alma/responses/ALMA_0_DV__0_0_360_0_45_90_348.5_373_373_GHz_ticra2007_VP.im', antnames=['DV'+'%02d'%k for k in range(25)]) vp.saveastable('mypb.tab') Then, supply vptable='mypb.tab' to tclean. ( Currently this will work only for non-parallel runs ) ------ Note on PB masks : In tclean, A-Projection gridders (mosaic and awproject) produce a .pb image and use the 'pblimit' subparameter to decide normalization cutoffs and construct an internal T/F mask in the .pb and .image images. However, this T/F mask cannot directly be used during deconvolution (which needs a 1/0 mask). There are two options for making a pb based deconvolution mask. -- Run tclean with niter=0 to produce the .pb, construct a 1/0 image with the desired threshold (using ia.open('newmask.im'); ia.calc('iif("xxx.pb">0.3,1.0,0.0)');ia.close() for example), and supply it via the 'mask' parameter in a subsequent run (with calcres=F and calcpsf=F to restart directly from the minor cycle). -- Run tclean with usemask='pb' for it to automatically construct a 1/0 mask from the internal T/F mask from .pb at a fixed 0.2 threshold. ----- Making PBs for gridders other than mosaic,awproject After the PSF generation, a PB is constructed using the same models used in gridder='mosaic' but just evaluated in the image domain without consideration to weights. facets Number of facets on a side A set of (facets x facets) subregions of the specified image are gridded separately using their respective phase centers (to minimize max W). Deconvolution is done on the joint full size image, using a PSF from the first subregion/facet. In our current version of tclean, facets>1 may be used only with parallel=False. psfphasecenter For mosaic use psf centered on this optional direction. You may need to use this if for example the mosaic does not have any pointing in the center of the image. Another reason; as the psf is approximate for a mosaic, this may help to deconvolve a non central bright source well and quickly. example: psfphasecenter=6 #center psf on field 6 psfphasecenter='J2000 19h30m00 -40d00m00' psfphasecenter='J2000 292.5deg -40.0deg' psfphasecenter='J2000 5.105rad -0.698rad' psfphasecenter='ICRS 13:05:27.2780 -049.28.04.458' wprojplanes Number of distinct w-values at which to compute and use different gridding convolution functions for W-Projection An appropriate value of wprojplanes depends on the presence/absence of a bright source far from the phase center, the desired dynamic range of an image in the presence of a bright far out source, the maximum w-value in the measurements, and the desired trade off between accuracy and computing cost. As a (rough) guide, VLA L-Band D-config may require a value of 128 for a source 30arcmin away from the phase center. A-config may require 1024 or more. To converge to an appropriate value, try starting with 128 and then increasing it if artifacts persist. W-term artifacts (for the VLA) typically look like arc-shaped smears in a synthesis image or a shift in source position between images made at different times. These artifacts are more pronounced the further the source is from the phase center. There is no harm in simply always choosing a large value (say, 1024) but there will be a significant performance cost to doing so, especially for gridder='awproject' where it is combined with A-Projection. wprojplanes=-1 is an option for gridder='widefield' or 'wproject' in which the number of planes is automatically computed. vptable vpmanager vptable="" : Choose default beams for different telescopes ALMA : Airy disks EVLA : old VLA models. Other primary beam models can be chosen via the vpmanager tool. Step 1 : Set up the vpmanager tool and save its state in a table vp.setpbpoly(telescope='EVLA', coeff=[1.0, -1.529e-3, 8.69e-7, -1.88e-10]) vp.saveastable('myvp.tab') Step 2 : Supply the name of that table in tclean. tclean(....., vptable='myvp.tab',....) Please see the documentation for the vpmanager for more details on how to choose different beam models. Work is in progress to update the defaults for EVLA and ALMA. Note : AWProjection currently does not use this mechanism to choose beam models. It instead uses ray-traced beams computed from parameterized aperture illumination functions, which are not available via the vpmanager. So, gridder='awproject' does not allow the user to set this parameter. mosweight When doing Brigg's style weighting (including uniform) to perform the weight density calculation for each field indepedently if True. If False the weight density is calculated from the average uv distribution of all the fields. aterm Use aperture illumination functions during gridding This parameter turns on the A-term of the AW-Projection gridder. Gridding convolution functions are constructed from aperture illumination function models of each antenna. psterm Include the Prolate Spheroidal (PS) funtion as the anti-aliasing operator in the gridding convolution functions used for gridding. Setting this parameter to true is necessary when aterm is set to false. It can be set to false when aterm is set to true, though with this setting effects of aliasing may be there in the image, particularly near the edges. When set to true, the .pb images will contain the fourier transform of the of the PS funtion. The table below enumarates the functional effects of the psterm, aterm and wprojplanes settings. PB referes to the Primary Beam and FT() refers to the Fourier transform operation. Operation aterm psterm wprojplanes Contents of the .pb image ---------------------------------------------------------------------- AW-Projection True True >1 FT(PS) x PB False PB A-Projection True True 1 FT(PS) x PB False PB W-Projection False True >1 FT(PS) Standard False True 1 FT(PS) wbawp Use frequency dependent A-terms Scale aperture illumination functions appropriately with frequency when gridding and combining data from multiple channels. conjbeams Use conjugate frequency for wideband A-terms While gridding data from one frequency channel, choose a convolution function from a 'conjugate' frequency such that the resulting baseline primary beam is approximately constant across frequency. For a system in which the primary beam scales with frequency, this step will eliminate instrumental spectral structure from the measured data and leave only the sky spectrum for the minor cycle to model and reconstruct [Bhatnagar et al., ApJ, 2013]. As a rough guideline for when this is relevant, a source at the half power point of the PB at the center frequency will see an artificial spectral index of -1.4 due to the frequency dependence of the PB [Sault and Wieringa, 1994]. If left uncorrected during gridding, this spectral structure must be modeled in the minor cycle (using the mtmfs algorithm) to avoid dynamic range limits (of a few hundred for a 2:1 bandwidth). This works for specmode='mfs' and its value is ignored for cubes cfcache Convolution function cache directory name Name of a directory in which to store gridding convolution functions. This cache is filled at the beginning of an imaging run. This step can be time consuming but the cache can be reused across multiple imaging runs that use the same image parameters (cell size, image size , spectral data selections, wprojplanes, wbawp, psterm, aterm). The effect of the wbawp, psterm and aterm settings is frozen-in in the cfcache. Using an existing cfcache made with a different setting of these parameters will not reflect the current settings. In a parallel execution, the construction of the cfcache is also parallelized and the time to compute scales close to linearly with the number of compute cores used. With the re-computation of Convolution Functions (CF) due to PA rotation turned-off (the computepastep parameter), the total number of in the cfcache can be computed as [No. of wprojplanes x No. of selected spectral windows x 4] By default, cfcache = imagename + '.cf' usepointing The usepointing flag informs the gridder that it should utilize the pointing table to use the correct direction in which the antenna is pointing with respect to the pointing phasecenter. computepastep Parallactic angle interval after the AIFs are recomputed (deg) This parameter controls the accuracy of the aperture illumination function used with AProjection for alt-az mount dishes where the AIF rotates on the sky as the synthesis image is built up. Once the PA in the data changes by the given interval, AIFs are re-computed at the new PA. A value of 360.0 deg (the default) implies no re-computation due to PA rotation. AIFs are computed for the PA value of the first valid data received and used for all of the data. rotatepastep Parallactic angle interval after which the nearest AIF is rotated (deg) Instead of recomputing the AIF for every timestep's parallactic angle, the nearest existing AIF is used and rotated after the PA changed by rotatepastep value. A value of 360.0 deg (the default) disables rotation of the AIF. For example, computepastep=360.0 and rotatepastep=5.0 will compute the AIFs at only the starting parallactic angle and all other timesteps will use a rotated version of that AIF at the nearest 5.0 degree point. pointingoffsetsigdev Corrections for heterogenous and time-dependent pointing offsets via AWProjection are controlled by this parameter. It is a vector of 2 ints or doubles each of which is interpreted in units of arcsec. Based on the first threshold, a clustering algorithm is applied to entries from the POINTING subtable of the MS to determine how distinct antenna groups for which the pointing offset must be computed separately. The second number controls how much a pointing change across time can be ignored and after which an antenna rebinning is required. Note : The default value of this parameter is [], due a programmatic constraint. If run with this value, it will internally pick [600,600] and exercise the option of using large tolerances (10arcmin) on both axes. Please choose a setting explicitly for runs that need to use this parameter. Note : This option is available only for gridder='awproject' and usepointing=True and and has been validated primarily with VLASS on-the-fly mosaic data where POINTING subtables have been modified after the data are recorded. Examples of parameter usage : [100.0,100.0] : Pointing offsets of 100 arcsec or less are considered small enough to be ignored. Using large values for both indicates a homogeneous array. [10.0, 100.0] : Based on entries in the POINTING subtable, antennas are grouped into clusters based on a 10arcsec bin size. All antennas in a bin are given a pointing offset calculated as the average of the offsets of all antennas in the bin. On the time axis, offset changes upto 100 arcsec will be ignored. [10.0,10.0] : Calculate separate pointing offsets for each antenna group (with a 10 arcsec bin size). As a function of time, recalculate the antenna binning if the POINTING table entries change by more than 10 arcsec w.r.to the previously computed binning. [1.0, 1.0] : Tight tolerances will imply a fully heterogenous situation where each antenna gets its own pointing offset. Also, time-dependent offset changes greater than 1 arcsec will trigger recomputes of the phase gradients. This is the most general situation and is also the most expensive option as it constructs and uses separate phase gradients for all baselines and timesteps. For VLASS 1.1 data with two kinds of pointing offsets, the recommended setting is [ 30.0, 30.0 ]. For VLASS 1.2 data with only the time-dependent pointing offsets, the recommended setting is [ 300.0, 30.0 ] to turn off the antenna grouping but to retain the time dependent corrections required from one timestep to the next. pblimit PB gain level at which to cut off normalizations Divisions by .pb during normalizations have a cut off at a .pb gain level given by pblimit. Outside this limit, image values are set to zero. Additionally, by default, an internal T/F mask is applied to the .pb, .image and .residual images to mask out (T) all invalid pixels outside the pblimit area. Note : This internal T/F mask cannot be used as a deconvolution mask. To do so, please follow the steps listed above in the Notes for the 'gridder' parameter. Note : To prevent the internal T/F mask from appearing in anything other than the .pb and .image.pbcor images, 'pblimit' can be set to a negative number. The absolute value will still be used as a valid 'pblimit'. A tclean restart using existing output images on disk that already have this T/F mask in the .residual and .image but only pblimit set to a negative value, will remove this mask after the next major cycle. normtype Normalization type (flatnoise, flatsky, pbsquare) Gridded (and FT'd) images represent the PB-weighted sky image. Qualitatively it can be approximated as two instances of the PB applied to the sky image (one naturally present in the data and one introduced during gridding via the convolution functions). xxx.weight : Weight image approximately equal to sum ( square ( pb ) ) xxx.pb : Primary beam calculated as sqrt ( xxx.weight ) normtype='flatnoise' : Divide the raw image by sqrt(.weight) so that the input to the minor cycle represents the product of the sky and PB. The noise is 'flat' across the region covered by each PB. normtype='flatsky' : Divide the raw image by .weight so that the input to the minor cycle represents only the sky. The noise is higher in the outer regions of the primary beam where the sensitivity is low. normtype='pbsquare' : No normalization after gridding and FFT. The minor cycle sees the sky times pb square deconvolver Name of minor cycle algorithm (hogbom,clark,multiscale,mtmfs,mem,clarkstokes) Each of the following algorithms operate on residual images and psfs from the gridder and produce output model and restored images. Minor cycles stop and a major cycle is triggered when cyclethreshold or cycleniter are reached. For all methods, components are picked from the entire extent of the image or (if specified) within a mask. hogbom : An adapted version of Hogbom Clean [Hogbom, 1974] - Find the location of the peak residual - Add this delta function component to the model image - Subtract a scaled and shifted PSF of the same size as the image from regions of the residual image where the two overlap. - Repeat clark : An adapted version of Clark Clean [Clark, 1980] - Find the location of max(I^2+Q^2+U^2+V^2) - Add delta functions to each stokes plane of the model image - Subtract a scaled and shifted PSF within a small patch size from regions of the residual image where the two overlap. - After several iterations trigger a Clark major cycle to subtract components from the visibility domain, but without de-gridding. - Repeat ( Note : 'clark' maps to imagermode='' in the old clean task. 'clark_exp' is another implementation that maps to imagermode='mosaic' or 'csclean' in the old clean task but the behavior is not identical. For now, please use deconvolver='hogbom' if you encounter problems. ) clarkstokes : Clark Clean operating separately per Stokes plane (Note : 'clarkstokes_exp' is an alternate version. See above.) multiscale : MultiScale Clean [Cornwell, 2008] - Smooth the residual image to multiple scale sizes - Find the location and scale at which the peak occurs - Add this multiscale component to the model image - Subtract a scaled,smoothed,shifted PSF (within a small patch size per scale) from all residual images - Repeat from step 2 mtmfs : Multi-term (Multi Scale) Multi-Frequency Synthesis [Rau and Cornwell, 2011] - Smooth each Taylor residual image to multiple scale sizes - Solve a NTxNT system of equations per scale size to compute Taylor coefficients for components at all locations - Compute gradient chi-square and pick the Taylor coefficients and scale size at the location with maximum reduction in chi-square - Add multi-scale components to each Taylor-coefficient model image - Subtract scaled,smoothed,shifted PSF (within a small patch size per scale) from all smoothed Taylor residual images - Repeat from step 2 mem : Maximum Entropy Method [Cornwell and Evans, 1985] - Iteratively solve for values at all individual pixels via the MEM method. It minimizes an objective function of chi-square plus entropy (here, a measure of difference between the current model and a flat prior model). (Note : This MEM implementation is not very robust. Improvements will be made in the future.) scales List of scale sizes (in pixels) for multi-scale and mtmfs algorithms. --> scales=[0,6,20] This set of scale sizes should represent the sizes (diameters in units of number of pixels) of dominant features in the image being reconstructed. The smallest scale size is recommended to be 0 (point source), the second the size of the synthesized beam and the third 3-5 times the synthesized beam, etc. For example, if the synthesized beam is 10" FWHM and cell=2",try scales = [0,5,15]. For numerical stability, the largest scale must be smaller than the image (or mask) size and smaller than or comparable to the scale corresponding to the lowest measured spatial frequency (as a scale size much larger than what the instrument is sensitive to is unconstrained by the data making it harder to recovery from errors during the minor cycle). nterms Number of Taylor coefficients in the spectral model - nterms=1 : Assume flat spectrum source - nterms=2 : Spectrum is a straight line with a slope - nterms=N : A polynomial of order N-1 From a Taylor expansion of the expression of a power law, the spectral index is derived as alpha = taylorcoeff_1 / taylorcoeff_0 Spectral curvature is similarly derived when possible. The optimal number of Taylor terms depends on the available signal to noise ratio, bandwidth ratio, and spectral shape of the source as seen by the telescope (sky spectrum x PB spectrum). nterms=2 is a good starting point for wideband EVLA imaging and the lower frequency bands of ALMA (when fractional bandwidth is greater than 10%) and if there is at least one bright source for which a dynamic range of greater than few 100 is desired. Spectral artifacts for the VLA often look like spokes radiating out from a bright source (i.e. in the image made with standard mfs imaging). If increasing the number of terms does not eliminate these artifacts, check the data for inadequate bandpass calibration. If the source is away from the pointing center, consider including wide-field corrections too. (Note : In addition to output Taylor coefficient images .tt0,.tt1,etc images of spectral index (.alpha), an estimate of error on spectral index (.alpha.error) and spectral curvature (.beta, if nterms is greater than 2) are produced. - These alpha, alpha.error and beta images contain internal T/F masks based on a threshold computed as peakresidual/10. Additional masking based on .alpha/.alpha.error may be desirable. - .alpha.error is a purely empirical estimate derived from the propagation of error during the division of two noisy numbers (alpha = xx.tt1/xx.tt0) where the 'error' on tt1 and tt0 are simply the values picked from the corresponding residual images. The absolute value of the error is not always accurate and it is best to interpret the errors across the image only in a relative sense.) smallscalebias A numerical control to bias the scales when using multi-scale or mtmfs algorithms. The peak from each scale's smoothed residual is multiplied by ( 1 - smallscalebias * scale/maxscale ) to increase or decrease the amplitude relative to other scales, before the scale with the largest peak is chosen. Smallscalebias can be varied between -1.0 and 1.0. A score of 0.0 gives all scales equal weight (default). A score larger than 0.0 will bias the solution towards smaller scales. A score smaller than 0.0 will bias the solution towards larger scales. The effect of smallscalebias is more pronounced when using multi-scale relative to mtmfs. restoration e. Construct a restored image : imagename.image by convolving the model image with a clean beam and adding the residual image to the result. If a restoringbeam is specified, the residual image is also smoothed to that target resolution before adding it in. If a .model does not exist, it will make an empty one and create the restored image from the residuals ( with additional smoothing if needed ). With algorithm='mtmfs', this will construct Taylor coefficient maps from the residuals and compute .alpha and .alpha.error. restoringbeam ize to use. - restoringbeam='' or [''] A Gaussian fitted to the PSF main lobe (separately per image plane). - restoringbeam='10.0arcsec' Use a circular Gaussian of this width for all planes - restoringbeam=['8.0arcsec','10.0arcsec','45deg'] Use this elliptical Gaussian for all planes - restoringbeam='common' Automatically estimate a common beam shape/size appropriate for all planes. Note : For any restoring beam different from the native resolution the model image is convolved with the beam and added to residuals that have been convolved to the same target resolution. pbcor the output restored image A new image with extension .image.pbcor will be created from the evaluation of .image / .pb for all pixels above the specified pblimit. Note : Stand-alone PB-correction can be triggered by re-running tclean with the appropriate imagename and with niter=0, calcpsf=False, calcres=False, pbcor=True, vptable='vp.tab' ( where vp.tab is the name of the vpmanager file. See the inline help for the 'vptable' parameter ) Note : Multi-term PB correction that includes a correction for the spectral index of the PB has not been enabled for the 4.7 release. Please use the widebandpbcor task instead. ( Wideband PB corrections are required when the amplitude of the brightest source is known accurately enough to be sensitive to the difference in the PB gain between the upper and lower end of the band at its location. As a guideline, the artificial spectral index due to the PB is -1.4 at the 0.5 gain level and less than -0.2 at the 0.9 gain level at the middle frequency ) outlierfile Name of outlier-field image definitions A text file containing sets of parameter=value pairs, one set per outlier field. Example : outlierfile='outs.txt' Contents of outs.txt : imagename=tst1 nchan=1 imsize=[80,80] cell=[8.0arcsec,8.0arcsec] phasecenter=J2000 19:58:40.895 +40.55.58.543 mask=circle[[40pix,40pix],10pix] imagename=tst2 nchan=1 imsize=[100,100] cell=[8.0arcsec,8.0arcsec] phasecenter=J2000 19:58:40.895 +40.56.00.000 mask=circle[[60pix,60pix],20pix] The following parameters are currently allowed to be different between the main field and the outlier fields (i.e. they will be recognized if found in the outlier text file). If a parameter is not listed, the value is picked from what is defined in the main task input. imagename, imsize, cell, phasecenter, startmodel, mask specmode, nchan, start, width, nterms, reffreq, gridder, deconvolver, wprojplanes Note : 'specmode' is an option, so combinations of mfs and cube for different image fields, for example, are supported. 'deconvolver' and 'gridder' are also options that allow different imaging or deconvolution algorithm per image field. For example, multiscale with wprojection and 16 w-term planes on the main field and mtmfs with nterms=3 and wprojection with 64 planes on a bright outlier source for which the frequency dependence of the primary beam produces a strong effect that must be modeled. The traditional alternative to this approach is to first image the outlier, subtract it out of the data (uvsub) and then image the main field. Note : If you encounter a use-case where some other parameter needs to be allowed in the outlier file (and it is logical to do so), please send us feedback. The above is an initial list. weighting Weighting scheme (natural,uniform,briggs,superuniform,radial, briggsabs, briggsbwtaper) During gridding of the dirty or residual image, each visibility value is multiplied by a weight before it is accumulated on the uv-grid. The PSF's uv-grid is generated by gridding only the weights (weightgrid). weighting='natural' : Gridding weights are identical to the data weights from the MS. For visibilities with similar data weights, the weightgrid will follow the sample density pattern on the uv-plane. This weighting scheme provides the maximum imaging sensitivity at the expense of a possibly fat PSF with high sidelobes. It is most appropriate for detection experiments where sensitivity is most important. weighting='uniform' : Gridding weights per visibility data point are the original data weights divided by the total weight of all data points that map to the same uv grid cell : ' data_weight / total_wt_per_cell '. The weightgrid is as close to flat as possible resulting in a PSF with a narrow main lobe and suppressed sidelobes. However, since heavily sampled areas of the uv-plane get down-weighted, the imaging sensitivity is not as high as with natural weighting. It is most appropriate for imaging experiments where a well behaved PSF can help the reconstruction. weighting='briggs' : Gridding weights per visibility data point are given by 'data_weight / ( A *total_wt_per_cell + B ) ' where A and B vary according to the 'robust' parameter. robust = -2.0 maps to A=1,B=0 or uniform weighting. robust = +2.0 maps to natural weighting. (robust=0.5 is equivalent to robust=0.0 in AIPS IMAGR.) Robust/Briggs weighting generates a PSF that can vary smoothly between 'natural' and 'uniform' and allow customized trade-offs between PSF shape and imaging sensitivity. weighting='briggsabs' : Experimental option. Same as Briggs except the formula is different A= robust*robust and B is dependent on the noise per visibility estimated. Giving noise='0Jy' is a not a reasonable option. In this mode (or formula) robust values from -2.0 to 0.0 only make sense (2.0 and -2.0 will get the same weighting) weighting='superuniform' : This is similar to uniform weighting except that the total_wt_per_cell is replaced by the total_wt_within_NxN_cells around the uv cell of interest. ( N = subparameter 'npixels' ) This method tends to give a PSF with inner sidelobes that are suppressed as in uniform weighting but with far-out sidelobes closer to natural weighting. The peak sensitivity is also closer to natural weighting. weighting='radial' : Gridding weights are given by ' data_weight * uvdistance ' This method approximately minimizes rms sidelobes for an east-west synthesis array. weighting='briggsbwtaper' : A modified version of Briggs weighting for cubes where an inverse uv taper, which is proportional to the fractional bandwidth of the entire cube, is applied per channel. The objective is to modify cube (perchanweightdensity = True) imaging weights to have a similar density to that of the continuum imaging weights. This is currently an experimental weighting scheme being developed for ALMA. For more details on weighting please see Chapter3 of Dan Briggs' thesis (http://www.aoc.nrao.edu/dissertations/dbriggs) robust Robustness parameter for Briggs weighting. robust = -2.0 maps to uniform weighting. robust = +2.0 maps to natural weighting. (robust=0.5 is equivalent to robust=0.0 in AIPS IMAGR.) noise noise parameter for briggs abs mode weighting npixels Number of pixels to determine uv-cell size for super-uniform weighting (0 defaults to -/+ 3 pixels) npixels -- uv-box used for weight calculation a box going from -npixel/2 to +npixel/2 on each side around a point is used to calculate weight density. npixels=2 goes from -1 to +1 and covers 3 pixels on a side. npixels=0 implies a single pixel, which does not make sense for superuniform weighting. Therefore, if npixels=0 it will be forced to 6 (or a box of -3pixels to +3pixels) to cover 7 pixels on a side. uvtaper uv-taper on outer baselines in uv-plane Apply a Gaussian taper in addition to the weighting scheme specified via the 'weighting' parameter. Higher spatial frequencies are weighted down relative to lower spatial frequencies to suppress artifacts arising from poorly sampled areas of the uv-plane. It is equivalent to smoothing the PSF obtained by other weighting schemes and can be specified either as a Gaussian in uv-space (eg. units of lambda) or as a Gaussian in the image domain (eg. angular units like arcsec). uvtaper = [bmaj, bmin, bpa] NOTE: the on-sky FWHM in arcsec is roughly the uv taper/200 (klambda). default: uvtaper=[]; no Gaussian taper applied example: uvtaper=['5klambda'] circular taper FWHM=5 kilo-lambda uvtaper=['5klambda','3klambda','45.0deg'] uvtaper=['10arcsec'] on-sky FWHM 10 arcseconds uvtaper=['300.0'] default units are lambda in aperture plane niter Maximum number of iterations A stopping criterion based on total iteration count. Currently the parameter type is defined as an integer therefore the integer value larger than 2147483647 will not be set properly as it causes an overflow. Iterations are typically defined as the selecting one flux component and partially subtracting it out from the residual image. niter=0 : Do only the initial major cycle (make dirty image, psf, pb, etc) niter larger than zero : Run major and minor cycles. Note : Global stopping criteria vs major-cycle triggers In addition to global stopping criteria, the following rules are used to determine when to terminate a set of minor cycle iterations and trigger major cycles [derived from Cotton-Schwab Clean, 1984] 'cycleniter' : controls the maximum number of iterations per image plane before triggering a major cycle. 'cyclethreshold' : Automatically computed threshold related to the max sidelobe level of the PSF and peak residual. Divergence, detected as an increase of 10% in peak residual from the minimum so far (during minor cycle iterations) The first criterion to be satisfied takes precedence. Note : Iteration counts for cubes or multi-field images : For images with multiple planes (or image fields) on which the deconvolver operates in sequence, iterations are counted across all planes (or image fields). The iteration count is compared with 'niter' only after all channels/planes/fields have completed their minor cycles and exited either due to 'cycleniter' or 'cyclethreshold'. Therefore, the actual number of iterations reported in the logger can sometimes be larger than the user specified value in 'niter'. For example, with niter=100, cycleniter=20,nchan=10,threshold=0, a total of 200 iterations will be done in the first set of minor cycles before the total is compared with niter=100 and it exits. Note : Additional global stopping criteria include - no change in peak residual across two major cycles - a 50% or more increase in peak residual across one major cycle gain Loop gain Fraction of the source flux to subtract out of the residual image for the CLEAN algorithm and its variants. A low value (0.2 or less) is recommended when the sky brightness distribution is not well represented by the basis functions used by the chosen deconvolution algorithm. A higher value can be tried when there is a good match between the true sky brightness structure and the basis function shapes. For example, for extended emission, multiscale clean with an appropriate set of scale sizes will tolerate a higher loop gain than Clark clean (for example). threshold Stopping threshold (number in units of Jy, or string) A global stopping threshold that the peak residual (within clean mask) across all image planes is compared to. threshold = 0.005 : 5mJy threshold = '5.0mJy' Note : A 'cyclethreshold' is internally computed and used as a major cycle trigger. It is related what fraction of the PSF can be reliably used during minor cycle updates of the residual image. By default the minor cycle iterations terminate once the peak residual reaches the first sidelobe level of the brightest source. 'cyclethreshold' is computed as follows using the settings in parameters 'cyclefactor','minpsffraction','maxpsffraction','threshold' : psf_fraction = max_psf_sidelobe_level * 'cyclefactor' psf_fraction = max(psf_fraction, 'minpsffraction'); psf_fraction = min(psf_fraction, 'maxpsffraction'); cyclethreshold = peak_residual * psf_fraction cyclethreshold = max( cyclethreshold, 'threshold' ) If nsigma is set (>0.0), the N-sigma threshold is calculated (see the description under nsigma), then cyclethreshold is further modified as, cyclethreshold = max( cyclethreshold, nsgima_threshold ) 'cyclethreshold' is made visible and editable only in the interactive GUI when tclean is run with interactive=True. nsigma Multiplicative factor for rms-based threshold stopping N-sigma threshold is calculated as nsigma * rms value per image plane determined from a robust statistics. For nsigma > 0.0, in a minor cycle, a maximum of the two values, the N-sigma threshold and cyclethreshold, is used to trigger a major cycle (see also the descreption under 'threshold'). Set nsigma=0.0 to preserve the previous tclean behavior without this feature. The top level parameter, fastnoise is relevant for the rms noise calculation which is used to determine the threshold. The parameter 'nsigma' may be an int, float, or a double. cycleniter Maximum number of minor-cycle iterations (per plane) before triggering a major cycle For example, for a single plane image, if niter=100 and cycleniter=20, there will be 5 major cycles after the initial one (assuming there is no threshold based stopping criterion). At each major cycle boundary, if the number of iterations left over (to reach niter) is less than cycleniter, it is set to the difference. Note : cycleniter applies per image plane, even if cycleniter x nplanes gives a total number of iterations greater than 'niter'. This is to preserve consistency across image planes within one set of minor cycle iterations. cyclefactor Scaling on PSF sidelobe level to compute the minor-cycle stopping threshold. Please refer to the Note under the documentation for 'threshold' that discussed the calculation of 'cyclethreshold' cyclefactor=1.0 results in a cyclethreshold at the first sidelobe level of the brightest source in the residual image before the minor cycle starts. cyclefactor=0.5 allows the minor cycle to go deeper. cyclefactor=2.0 triggers a major cycle sooner. minpsffraction PSF fraction that marks the max depth of cleaning in the minor cycle Please refer to the Note under the documentation for 'threshold' that discussed the calculation of 'cyclethreshold' For example, minpsffraction=0.5 will stop cleaning at half the height of the peak residual and trigger a major cycle earlier. maxpsffraction PSF fraction that marks the minimum depth of cleaning in the minor cycle Please refer to the Note under the documentation for 'threshold' that discussed the calculation of 'cyclethreshold' For example, maxpsffraction=0.8 will ensure that at least the top 20 percent of the source will be subtracted out in the minor cycle even if the first PSF sidelobe is at the 0.9 level (an extreme example), or if the cyclefactor is set too high for anything to get cleaned. interactive Modify masks and parameters at runtime interactive=True will trigger an interactive GUI at every major cycle boundary (after the major cycle and before the minor cycle). The interactive mode is currently not available for parallel cube imaging (please also refer to the Note under the documentation for 'parallel' below). Options for runtime parameter modification are : Interactive clean mask : Draw a 1/0 mask (appears as a contour) by hand. If a mask is supplied at the task interface or if automasking is invoked, the current mask is displayed in the GUI and is available for manual editing. Note : If a mask contour is not visible, please check the cursor display at the bottom of GUI to see which parts of the mask image have ones and zeros. If the entire mask=1 no contours will be visible. Operation buttons : -- Stop execution now (restore current model and exit) -- Continue on until global stopping criteria are reached without stopping for any more interaction -- Continue with minor cycles and return for interaction after the next major cycle. Iteration control : -- max cycleniter : Trigger for the next major cycle The display begins with [ min( cycleniter, niter - itercount ) ] and can be edited by hand. -- iterations left : The display begins with [niter-itercount ] and can be edited to increase or decrease the total allowed niter. -- threshold : Edit global stopping threshold -- cyclethreshold : The display begins with the automatically computed value (see Note in help for 'threshold'), and can be edited by hand. All edits will be reflected in the log messages that appear once minor cycles begin. [ For scripting purposes, replacing True/False with 1/0 will get tclean to return an imaging summary dictionary to python ] usemask Type of mask(s) to be used for deconvolution user: (default) mask image(s) or user specified region file(s) or string CRTF expression(s) subparameters: mask, pbmask pb: primary beam mask subparameter: pbmask Example: usemask="pb", pbmask=0.2 Construct a mask at the 0.2 pb gain level. (Currently, this option will work only with gridders that produce .pb (i.e. mosaic and awproject) or if an externally produced .pb image exists on disk) auto-multithresh : auto-masking by multiple thresholds for deconvolution subparameters : sidelobethreshold, noisethreshold, lownoisethreshold, negativethrehsold, smoothfactor, minbeamfrac, cutthreshold, pbmask, growiterations, dogrowprune, minpercentchange, verbose Additional top level parameter relevant to auto-multithresh: fastnoise if pbmask is >0.0, the region outside the specified pb gain level is excluded from image statistics in determination of the threshold. Note: By default the intermediate mask generated by automask at each deconvolution cycle is over-written in the next cycle but one can save them by setting the environment variable, SAVE_ALL_AUTOMASKS="true". (e.g. in the CASA prompt, os.environ['SAVE_ALL_AUTOMASKS']="true" ) The saved CASA mask image name will be imagename.mask.autothresh#, where # is the iteration cycle number. mask Mask (a list of image name(s) or region file(s) or region string(s) The name of a CASA image or region file or region string that specifies a 1/0 mask to be used for deconvolution. Only locations with value 1 will be considered for the centers of flux components in the minor cycle. If regions specified fall completely outside of the image, tclean will throw an error. Manual mask options/examples : mask='xxx.mask' : Use this CASA image named xxx.mask and containing ones and zeros as the mask. If the mask is only different in spatial coordinates from what is being made it will be resampled to the target coordinate system before being used. The mask has to have the same shape in velocity and Stokes planes as the output image. Exceptions are single velocity and/or single Stokes plane masks. They will be expanded to cover all velocity and/or Stokes planes of the output cube. [ Note : If an error occurs during image resampling or if the expected mask does not appear, please try using tasks 'imregrid' or 'makemask' to resample the mask image onto a CASA image with the target shape and coordinates and supply it via the 'mask' parameter. ] mask='xxx.crtf' : A text file with region strings and the following on the first line ( #CRTFv0 CASA Region Text Format version 0 ) This is the format of a file created via the viewer's region tool when saved in CASA region file format. mask='circle[[40pix,40pix],10pix]' : A CASA region string. mask=['xxx.mask','xxx.crtf', 'circle[[40pix,40pix],10pix]'] : a list of masks Note : Mask images for deconvolution must contain 1 or 0 in each pixel. Such a mask is different from an internal T/F mask that can be held within each CASA image. These two types of masks are not automatically interchangeable, so please use the makemask task to copy between them if you need to construct a 1/0 based mask from a T/F one. Note : Work is in progress to generate more flexible masking options and enable more controls. pbmask Sub-parameter for usemask='auto-multithresh': primary beam mask Examples : pbmask=0.0 (default, no pb mask) pbmask=0.2 (construct a mask at the 0.2 pb gain level) sidelobethreshold Sub-parameter for "auto-multithresh": mask threshold based on sidelobe levels: sidelobethreshold * max_sidelobe_level * peak residual noisethreshold Sub-parameter for "auto-multithresh": mask threshold based on the noise level: noisethreshold * rms + location (=median) The rms is calculated from MAD with rms = 1.4826*MAD. lownoisethreshold Sub-parameter for "auto-multithresh": mask threshold to grow previously masked regions via binary dilation: lownoisethreshold * rms in residual image + location (=median) The rms is calculated from MAD with rms = 1.4826*MAD. negativethreshold Sub-parameter for "auto-multithresh": mask threshold for negative features: -1.0* negativethreshold * rms + location(=median) The rms is calculated from MAD with rms = 1.4826*MAD. smoothfactor Sub-parameter for "auto-multithresh": smoothing factor in a unit of the beam minbeamfrac Sub-parameter for "auto-multithresh": minimum beam fraction in size to prune masks smaller than mimbeamfrac * beam <=0.0 : No pruning cutthreshold Sub-parameter for "auto-multithresh": threshold to cut the smoothed mask to create a final mask: cutthreshold * peak of the smoothed mask growiterations Sub-parameter for "auto-multithresh": Maximum number of iterations to perform using binary dilation for growing the mask dogrowprune Experimental sub-parameter for "auto-multithresh": Do pruning on the grow mask minpercentchange If the change in the mask size in a particular channel is less than minpercentchange, stop masking that channel in subsequent cycles. This check is only applied when noise based threshold is used and when the previous clean major cycle had a cyclethreshold value equal to the clean threshold. Values equal to -1.0 (or any value less than 0.0) will turn off this check (the default). Automask will still stop masking if the current channel mask is an empty mask and the noise threshold was used to determine the mask. verbose he summary of automasking at the end of each automasking process is printed in the logger. Following information per channel will be listed in the summary. chan: channel number masking?: F - stop updating automask for the subsequent iteration cycles RMS: robust rms noise peak: peak in residual image thresh_type: type of threshold used (noise or sidelobe) thresh_value: the value of threshold used N_reg: number of the automask regions N_pruned: number of the automask regions removed by pruning N_grow: number of the grow mask regions N_grow_pruned: number of the grow mask regions removed by pruning N_neg_pix: number of pixels for negative mask regions Note that for a large cube, extra logging may slow down the process. fastnoise mask (user='multi-autothresh') and/or n-sigma stopping threshold (nsigma>0.0) are/is used. If it is set to True, a simpler but faster noise calucation is used. In this case, the threshold values are determined based on classic statistics (using all unmasked pixels for the calculations). If it is set to False, the new noise calculation method is used based on pre-existing mask. Case 1: no exiting mask Calculate image statistics using Chauvenet algorithm Case 2: there is an existing mask Calculate image statistics by classical method on the region outside the mask and inside the primary beam mask. In all cases above RMS noise is calculated from MAD. restart images (and start from an existing model image) or automatically increment the image name and make a new image set. True : Re-use existing images. If imagename.model exists the subsequent run will start from this model (i.e. predicting it using current gridder settings and starting from the residual image). Care must be taken when combining this option with startmodel. Currently, only one or the other can be used. startmodel='', imagename.model exists : - Start from imagename.model startmodel='xxx', imagename.model does not exist : - Start from startmodel startmodel='xxx', imagename.model exists : - Exit with an error message requesting the user to pick only one model. This situation can arise when doing one run with startmodel='xxx' to produce an output imagename.model that includes the content of startmodel, and wanting to restart a second run to continue deconvolution. Startmodel should be set to '' before continuing. If any change in the shape or coordinate system of the image is desired during the restart, please change the image name and use the startmodel (and mask) parameter(s) so that the old model (and mask) can be regridded to the new coordinate system before starting. False : A convenience feature to increment imagename with '_1', '_2', etc as suffixes so that all runs of tclean are fresh starts (without having to change the imagename parameter or delete images). This mode will search the current directory for all existing imagename extensions, pick the maximum, and adds 1. For imagename='try' it will make try.psf, try_2.psf, try_3.psf, etc. This also works if you specify a directory name in the path : imagename='outdir/try'. If './outdir' does not exist, it will create it. Then it will search for existing filenames inside that directory. If outlier fields are specified, the incrementing happens for each of them (since each has its own 'imagename'). The counters are synchronized across imagefields, to make it easier to match up sets of output images. It adds 1 to the 'max id' from all outlier names on disk. So, if you do two runs with only the main field (imagename='try'), and in the third run you add an outlier with imagename='outtry', you will get the following image names for the third run : 'try_3' and 'outtry_3' even though 'outry' and 'outtry_2' have not been used. savemodel Options to save model visibilities (none, virtual, modelcolumn) Often, model visibilities must be created and saved in the MS to be later used for self-calibration (or to just plot and view them). none : Do not save any model visibilities in the MS. The MS is opened in readonly mode. Model visibilities can be predicted in a separate step by restarting tclean with niter=0,savemodel=virtual or modelcolumn and not changing any image names so that it finds the .model on disk (or by changing imagename and setting startmodel to the original imagename). virtual : In the last major cycle, save the image model and state of the gridder used during imaging within the SOURCE subtable of the MS. Images required for de-gridding will also be stored internally. All future references to model visibilities will activate the (de)gridder to compute them on-the-fly. This mode is useful when the dataset is large enough that an additional model data column on disk may be too much extra disk I/O, when the gridder is simple enough that on-the-fly recomputing of the model visibilities is quicker than disk I/O. For e.g. that gridder='awproject' does not support virtual model. modelcolumn : In the last major cycle, save predicted model visibilities in the MODEL_DATA column of the MS. This mode is useful when the de-gridding cost to produce the model visibilities is higher than the I/O required to read the model visibilities from disk. This mode is currently required for gridder='awproject'. This mode is also required for the ability to later pull out model visibilities from the MS into a python array for custom processing. Note 1 : The imagename.model image on disk will always be constructed if the minor cycle runs. This savemodel parameter applies only to model visibilities created by de-gridding the model image. Note 2 : It is possible for an MS to have both a virtual model as well as a model_data column, but under normal operation, the last used mode will get triggered. Use the delmod task to clear out existing models from an MS if confusion arises. Note 3: when parallel=True, use savemodel='none'; Other options are not yet ready for use in parallel. If model visibilities need to be saved (virtual or modelcolumn): please run tclean in serial mode with niter=0; after the parallel run calcres Calculate initial residual image This parameter controls what the first major cycle does. calcres=False with niter greater than 0 will assume that a .residual image already exists and that the minor cycle can begin without recomputing it. calcres=False with niter=0 implies that only the PSF will be made and no data will be gridded. calcres=True requires that calcpsf=True or that the .psf and .sumwt images already exist on disk (for normalization purposes). Usage example : For large runs (or a pipeline scripts) it may be useful to first run tclean with niter=0 to create an initial .residual to look at and perhaps make a custom mask for. Imaging can be resumed without recomputing it. calcpsf Calculate PSF This parameter controls what the first major cycle does. calcpsf=False will assume that a .psf image already exists and that the minor cycle can begin without recomputing it. psfcutoff When the .psf image is created a 2 dimensional Gaussian is fit to the main lobe of the PSF. Which pixels in the PSF are fitted is determined by psfcutoff. The default value of psfcutoff is 0.35 and can varied from 0.01 to 0.99. Fitting algorithm: - A region of 41 x 41 pixels around the peak of the PSF is compared against the psfcutoff. Sidelobes are ignored by radially searching from the PSF peak. - Calculate the bottom left corner (blc) and top right corner (trc) from the points. Expand blc and trc with a number of pixels (5). - Create a new sub-matrix from blc and trc. - Interpolate matrix to a target number of points (3001) using CUBIC spline. - All the non-sidelobe points, in the interpolated matrix, that are above the psfcutoff are used to fit a Gaussian. A Levenberg-Marquardt algorithm is used. - If the fitting fails the algorithm is repeated with the psfcutoff decreased (psfcutoff=psfcutoff/1.5). A message in the log will apear if the fitting fails along with the new value of psfcutoff. This will be done up to 50 times if fitting fails. This Gaussian beam is defined by a major axis, minor axis, and position angle. During the restoration process, this Gaussian beam is used as the Clean beam. Varying psfcutoff might be useful for producing a better fit for highly non-Gaussian PSFs, however, the resulting fits should be carefully checked. This parameter should rarely be changed. (This is not the support size for clark clean.) parallel Run major cycles in parallel (this feature is experimental) Parallel tclean will run only if casa has already been started using mpirun. Please refer to HPC documentation for details on how to start this on your system. Example : mpirun -n 3 -xterm 0 `which casa` Continuum Imaging : - Data are partitioned (in time) into NProc pieces - Gridding/iFT is done separately per partition - Images (and weights) are gathered and then normalized - One non-parallel minor cycle is run - Model image is scattered to all processes - Major cycle is done in parallel per partition Cube Imaging : - Data and Image coordinates are partitioned (in freq) into NProc pieces - Each partition is processed independently (major and minor cycles) - All processes are synchronized at major cycle boundaries for convergence checks - At the end, cubes from all partitions are concatenated along the spectral axis Note 1 : Iteration control for cube imaging is independent per partition. - There is currently no communication between them to synchronize information such as peak residual and cyclethreshold. Therefore, different chunks may trigger major cycles at different levels. - For cube imaging in parallel, there is currently no interactive masking. (Proper synchronization of iteration control is work in progress.) RETURNS void --------- examples ----------------------------------------------------------- This is the first release of our refactored imager code. Although most features have been used and validated, there are many details that have not been thoroughly tested. Feedback will be much appreciated. Usage Examples : ----------------------- (A) A suite of test programs that demo all usable modes of tclean on small test datasets https://svn.cv.nrao.edu/svn/casa/branches/release-4_5/gcwrap/python/scripts/tests/test_refimager.py (B) A set of demo examples for ALMA imaging https://casaguides.nrao.edu/index.php/TCLEAN_and_ALMA """ _info_group_ = """imaging""" _info_desc_ = """Parallelized tclean in consecutive time steps""" __schema = {'vis': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'imageprefix': {'type': 'cStr', 'coerce': _coerce.to_str}, 'imagesuffix': {'type': 'cStr', 'coerce': _coerce.to_str}, 'ncpu': {'type': 'cInt'}, 'twidth': {'type': 'cInt'}, 'doreg': {'type': 'cBool'}, 'usephacenter': {'type': 'cBool'}, 'reftime': {'type': 'cStr', 'coerce': _coerce.to_str}, 'toTb': {'type': 'cBool'}, 'sclfactor': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'subregion': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'docompress': {'type': 'cBool'}, 'overwrite': {'type': 'cBool'}, 'selectdata': {'type': 'cBool'}, 'field': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'spw': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'timerange': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'uvrange': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'antenna': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'scan': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'observation': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cInt'}]}, 'intent': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'datacolumn': {'type': 'cStr', 'coerce': _coerce.to_str}, 'imagename': {'anyof': [{'type': 'cInt'}, {'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'imsize': {'anyof': [{'type': 'cInt'}, {'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}]}, 'cell': {'anyof': [{'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}, {'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cFloat', 'coerce': _coerce.to_float}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}, {'type': 'cInt'}, {'type': 'cFloatVec', 'coerce': [_coerce.to_list,_coerce.to_floatvec]}]}, 'phasecenter': {'anyof': [{'type': 'cInt'}, {'type': 'cStr', 'coerce': _coerce.to_str}]}, 'stokes': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'I', 'IQUV', 'UV', 'RRLL', 'IQ', 'V', 'pseudoI', 'QU', 'YY', 'RR', 'Q', 'U', 'IV', 'XX', 'XXYY', 'LL' ]}, 'projection': {'type': 'cStr', 'coerce': _coerce.to_str}, 'startmodel': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'specmode': {'type': 'cVariant', 'coerce': [_coerce.to_variant] # <allowed> IS NOT ALLOWED FOR A PARAMETER OF TYPE any }, 'reffreq': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'nchan': {'type': 'cInt'}, 'start': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'width': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'outframe': {'type': 'cStr', 'coerce': _coerce.to_str}, 'veltype': {'type': 'cStr', 'coerce': _coerce.to_str}, 'restfreq': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'interpolation': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'nearest', 'linear', 'cubic' ]}, 'perchanweightdensity': {'type': 'cBool'}, 'gridder': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'widefield', 'wproject', 'imagemosaic', 'standard', 'awproject', 'wprojectft', 'mosaicft', 'ft', 'ftmosaic', 'mosaic', 'awprojectft', 'gridft' ]}, 'facets': {'type': 'cInt'}, 'psfphasecenter': {'anyof': [{'type': 'cInt'}, {'type': 'cStr', 'coerce': _coerce.to_str}]}, 'wprojplanes': {'type': 'cInt'}, 'vptable': {'type': 'cStr', 'coerce': _coerce.to_str}, 'mosweight': {'type': 'cBool'}, 'aterm': {'type': 'cBool'}, 'psterm': {'type': 'cBool'}, 'wbawp': {'type': 'cBool'}, 'conjbeams': {'type': 'cBool'}, 'cfcache': {'type': 'cStr', 'coerce': _coerce.to_str}, 'usepointing': {'type': 'cBool'}, 'computepastep': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'rotatepastep': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'pointingoffsetsigdev': {'anyof': [{'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}, {'type': 'cFloatVec', 'coerce': [_coerce.to_list,_coerce.to_floatvec]}]}, 'pblimit': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'normtype': {'type': 'cStr', 'coerce': _coerce.to_str}, 'deconvolver': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'clarkstokes_exp', 'mtmfs', 'mem', 'clarkstokes', 'hogbom', 'clark_exp', 'clark', 'multiscale' ]}, 'scales': {'anyof': [{'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}, {'type': 'cFloatVec', 'coerce': [_coerce.to_list,_coerce.to_floatvec]}]}, 'nterms': {'type': 'cInt'}, 'smallscalebias': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'restoration': {'type': 'cBool'}, 'restoringbeam': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'pbcor': {'type': 'cBool'}, 'outlierfile': {'type': 'cStr', 'coerce': _coerce.to_str}, 'weighting': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'briggsabs', 'briggs', 'briggsbwtaper', 'natural', 'radial', 'superuniform', 'uniform' ]}, 'robust': {'type': 'cFloat', 'coerce': _coerce.to_float, 'min': -2.0, 'max': 2.0}, 'noise': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'npixels': {'type': 'cInt'}, 'uvtaper': {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}, 'niter': {'type': 'cInt'}, 'gain': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'threshold': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'nsigma': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'cycleniter': {'type': 'cInt'}, 'cyclefactor': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'minpsffraction': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'maxpsffraction': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'interactive': {'anyof': [{'type': 'cBool'}, {'type': 'cInt'}]}, 'usemask': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'user', 'pb', 'auto-multithresh' ]}, 'mask': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'pbmask': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'sidelobethreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'noisethreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'lownoisethreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'negativethreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'smoothfactor': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'minbeamfrac': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'cutthreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'growiterations': {'type': 'cInt'}, 'dogrowprune': {'type': 'cBool'}, 'minpercentchange': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'verbose': {'type': 'cBool'}, 'fastnoise': {'type': 'cBool'}, 'restart': {'type': 'cBool'}, 'savemodel': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'none', 'virtual', 'modelcolumn' ]}, 'calcres': {'type': 'cBool'}, 'calcpsf': {'type': 'cBool'}, 'psfcutoff': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'parallel': {'type': 'cBool'}} def __init__(self): self.__stdout = None self.__stderr = None self.__root_frame_ = None def __globals_(self): if self.__root_frame_ is None: self.__root_frame_ = _find_frame( ) assert self.__root_frame_ is not None, "could not find CASAshell global frame" return self.__root_frame_ def __to_string_(self,value): if type(value) is str: return "'%s'" % value else: return str(value) def __validate_(self,doc,schema): return _pc.validate(doc,schema) def __do_inp_output(self,param_prefix,description_str,formatting_chars): out = self.__stdout or sys.stdout description = description_str.split( ) prefix_width = 23 + 23 + 4 output = [ ] addon = '' first_addon = True while len(description) > 0: ## starting a new line..................................................................... if len(output) == 0: ## for first line add parameter information............................................ if len(param_prefix)-formatting_chars > prefix_width - 1: output.append(param_prefix) continue addon = param_prefix + ' #' first_addon = True addon_formatting = formatting_chars else: ## for subsequent lines space over prefix width........................................ addon = (' ' * prefix_width) + '#' first_addon = False addon_formatting = 0 ## if first word of description puts us over the screen width, bail........................ if len(addon + description[0]) - addon_formatting + 1 > self.term_width: ## if we're doing the first line make sure it's output................................. if first_addon: output.append(addon) break while len(description) > 0: ## if the next description word puts us over break for the next line................... if len(addon + description[0]) - addon_formatting + 1 > self.term_width: break addon = addon + ' ' + description[0] description.pop(0) output.append(addon) out.write('\n'.join(output) + '\n') #--------- return nonsubparam values ---------------------------------------------- def __phasecenter_dflt( self, glb ): return '' def __phasecenter( self, glb ): if 'phasecenter' in glb: return glb['phasecenter'] return '' def __projection_dflt( self, glb ): return 'SIN' def __projection( self, glb ): if 'projection' in glb: return glb['projection'] return 'SIN' def __vis_dflt( self, glb ): return '' def __vis( self, glb ): if 'vis' in glb: return glb['vis'] return '' def __imagesuffix_dflt( self, glb ): return '' def __imagesuffix( self, glb ): if 'imagesuffix' in glb: return glb['imagesuffix'] return '' def __parallel_dflt( self, glb ): return False def __parallel( self, glb ): if 'parallel' in glb: return glb['parallel'] return False def __twidth_dflt( self, glb ): return int(1) def __twidth( self, glb ): if 'twidth' in glb: return glb['twidth'] return int(1) def __datacolumn_dflt( self, glb ): return 'corrected' def __datacolumn( self, glb ): if 'datacolumn' in glb: return glb['datacolumn'] return 'corrected' def __restart_dflt( self, glb ): return True def __restart( self, glb ): if 'restart' in glb: return glb['restart'] return True def __cell_dflt( self, glb ): return [ ] def __cell( self, glb ): if 'cell' in glb: return glb['cell'] return [ ] def __startmodel_dflt( self, glb ): return '' def __startmodel( self, glb ): if 'startmodel' in glb: return glb['startmodel'] return '' def __deconvolver_dflt( self, glb ): return 'hogbom' def __deconvolver( self, glb ): if 'deconvolver' in glb: return glb['deconvolver'] return 'hogbom' def __imsize_dflt( self, glb ): return [ int(100) ] def __imsize( self, glb ): if 'imsize' in glb: return glb['imsize'] return [ int(100) ] def __calcpsf_dflt( self, glb ): return True def __calcpsf( self, glb ): if 'calcpsf' in glb: return glb['calcpsf'] return True def __niter_dflt( self, glb ): return int(0) def __niter( self, glb ): if 'niter' in glb: return glb['niter'] return int(0) def __selectdata_dflt( self, glb ): return True def __selectdata( self, glb ): if 'selectdata' in glb: return glb['selectdata'] return True def __imageprefix_dflt( self, glb ): return '' def __imageprefix( self, glb ): if 'imageprefix' in glb: return glb['imageprefix'] return '' def __outlierfile_dflt( self, glb ): return '' def __outlierfile( self, glb ): if 'outlierfile' in glb: return glb['outlierfile'] return '' def __calcres_dflt( self, glb ): return True def __calcres( self, glb ): if 'calcres' in glb: return glb['calcres'] return True def __ncpu_dflt( self, glb ): return int(8) def __ncpu( self, glb ): if 'ncpu' in glb: return glb['ncpu'] return int(8) def __savemodel_dflt( self, glb ): return 'none' def __savemodel( self, glb ): if 'savemodel' in glb: return glb['savemodel'] return 'none' def __usemask_dflt( self, glb ): return 'user' def __usemask( self, glb ): if 'usemask' in glb: return glb['usemask'] return 'user' def __specmode_dflt( self, glb ): return 'mfs' def __specmode( self, glb ): if 'specmode' in glb: return glb['specmode'] return 'mfs' def __restoration_dflt( self, glb ): return True def __restoration( self, glb ): if 'restoration' in glb: return glb['restoration'] return True def __stokes_dflt( self, glb ): return 'I' def __stokes( self, glb ): if 'stokes' in glb: return glb['stokes'] return 'I' def __fastnoise_dflt( self, glb ): return True def __fastnoise( self, glb ): if 'fastnoise' in glb: return glb['fastnoise'] return True def __imagename_dflt( self, glb ): return '' def __imagename( self, glb ): if 'imagename' in glb: return glb['imagename'] return '' def __weighting_dflt( self, glb ): return 'natural' def __weighting( self, glb ): if 'weighting' in glb: return glb['weighting'] return 'natural' def __gridder_dflt( self, glb ): return 'standard' def __gridder( self, glb ): if 'gridder' in glb: return glb['gridder'] return 'standard' def __overwrite_dflt( self, glb ): return False def __overwrite( self, glb ): if 'overwrite' in glb: return glb['overwrite'] return False def __doreg_dflt( self, glb ): return False def __doreg( self, glb ): if 'doreg' in glb: return glb['doreg'] return False #--------- return inp/go default -------------------------------------------------- def __antenna_dflt( self, glb ): if self.__selectdata( glb ) == bool(True): return "" return None def __smoothfactor_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return float(1.0) return None def __negativethreshold_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return float(0.0) return None def __minbeamfrac_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return float(0.3) return None def __psfphasecenter_dflt( self, glb ): if self.__gridder( glb ) == "mosaic": return "" if self.__gridder( glb ) == "mosaicft": return "" return None def __mask_dflt( self, glb ): if self.__usemask( glb ) == "user": return "" return None def __sclfactor_dflt( self, glb ): if self.__doreg( glb ) == bool(True): return float(1.0) return None def __field_dflt( self, glb ): if self.__selectdata( glb ) == bool(True): return "" return None def __cutthreshold_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return float(0.01) return None def __pblimit_dflt( self, glb ): if self.__gridder( glb ) == "standard": return float(0.2) if self.__gridder( glb ) == "widefield": return float(0.2) if self.__gridder( glb ) == "wproject": return float(0.2) if self.__gridder( glb ) == "wprojectft": return float(0.2) if self.__gridder( glb ) == "mosaic": return float(0.2) if self.__gridder( glb ) == "mosaicft": return float(0.2) if self.__gridder( glb ) == "ftmosaic": return float(0.2) if self.__gridder( glb ) == "imagemosaic": return float(0.2) if self.__gridder( glb ) == "awproject": return float(0.2) if self.__gridder( glb ) == "awprojectft": return float(0.2) return None def __smallscalebias_dflt( self, glb ): if self.__deconvolver( glb ) == "multiscale": return float(0.0) if self.__deconvolver( glb ) == "mtmfs": return float(0.0) return None def __maxpsffraction_dflt( self, glb ): if self.__niter( glb ) != int(0): return float(0.8) return None def __verbose_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return bool(False) return None def __intent_dflt( self, glb ): if self.__selectdata( glb ) == bool(True): return "" return None def __noise_dflt( self, glb ): if self.__weighting( glb ) == "briggsabs": return "1.0Jy" return None def __interpolation_dflt( self, glb ): if self.__specmode( glb ) == "cube": return "linear" if self.__specmode( glb ) == "cubesource": return "linear" if self.__specmode( glb ) == "cubedata": return "linear" return None def __subregion_dflt( self, glb ): if self.__doreg( glb ) == bool(True): return "" return None def __nterms_dflt( self, glb ): if self.__deconvolver( glb ) == "mtmfs": return int(2) return None def __pointingoffsetsigdev_dflt( self, glb ): if self.__gridder( glb ) == "awproject": return [] if self.__gridder( glb ) == "awprojectft": return [] return None def __nchan_dflt( self, glb ): if self.__specmode( glb ) == "cube": return int(-1) if self.__specmode( glb ) == "cubesource": return int(-1) if self.__specmode( glb ) == "cubedata": return int(-1) return None def __reffreq_dflt( self, glb ): if self.__specmode( glb ) == "mfs": return "" return None def __conjbeams_dflt( self, glb ): if self.__gridder( glb ) == "mosaic": return bool(False) if self.__gridder( glb ) == "mosaicft": return bool(False) if self.__gridder( glb ) == "awproject": return bool(False) if self.__gridder( glb ) == "awprojectft": return bool(False) return None def __restoringbeam_dflt( self, glb ): if self.__restoration( glb ) == bool(True): return [] return None def __sidelobethreshold_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return float(3.0) return None def __reftime_dflt( self, glb ): if self.__doreg( glb ) == bool(True): return "" return None def __cycleniter_dflt( self, glb ): if self.__niter( glb ) != int(0): return int(-1) return None def __minpsffraction_dflt( self, glb ): if self.__niter( glb ) != int(0): return float(0.05) return None def __scan_dflt( self, glb ): if self.__selectdata( glb ) == bool(True): return "" return None def __computepastep_dflt( self, glb ): if self.__gridder( glb ) == "awproject": return float(360.0) if self.__gridder( glb ) == "awprojectft": return float(360.0) return None def __minpercentchange_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return float(-1.0) return None def __wbawp_dflt( self, glb ): if self.__gridder( glb ) == "awproject": return bool(True) if self.__gridder( glb ) == "awprojectft": return bool(True) return None def __docompress_dflt( self, glb ): if self.__doreg( glb ) == bool(True): return bool(False) return None def __interactive_dflt( self, glb ): if self.__niter( glb ) != int(0): return False return None def __npixels_dflt( self, glb ): if self.__weighting( glb ) == "briggs": return int(0) if self.__weighting( glb ) == "briggsabs": return int(0) return None def __mosweight_dflt( self, glb ): if self.__gridder( glb ) == "mosaic": return bool(True) if self.__gridder( glb ) == "ftmosaic": return bool(True) if self.__gridder( glb ) == "awproject": return bool(False) if self.__gridder( glb ) == "awprojectft": return bool(False) return None def __pbcor_dflt( self, glb ): if self.__restoration( glb ) == bool(True): return bool(False) return None def __normtype_dflt( self, glb ): if self.__gridder( glb ) == "mosaic": return "flatnoise" if self.__gridder( glb ) == "mosaicft": return "flatnoise" if self.__gridder( glb ) == "ftmosaic": return "flatnoise" if self.__gridder( glb ) == "imagemosaic": return "flatnoise" if self.__gridder( glb ) == "awproject": return "flatnoise" if self.__gridder( glb ) == "awprojectft": return "flatnoise" return None def __uvtaper_dflt( self, glb ): if self.__weighting( glb ) == "natural": return [] if self.__weighting( glb ) == "briggs": return [] if self.__weighting( glb ) == "briggsabs": return [] if self.__weighting( glb ) == "briggsbwtaper": return [] return None def __cyclefactor_dflt( self, glb ): if self.__niter( glb ) != int(0): return float(1.0) return None def __toTb_dflt( self, glb ): if self.__doreg( glb ) == bool(True): return bool(False) return None def __restfreq_dflt( self, glb ): if self.__specmode( glb ) == "cube": return [] if self.__specmode( glb ) == "cubesource": return [] if self.__specmode( glb ) == "cubedata": return [] return None def __pbmask_dflt( self, glb ): if self.__usemask( glb ) == "user": return float(0.0) if self.__usemask( glb ) == "pb": return float(0.2) if self.__usemask( glb ) == "auto-multithresh": return float(0.2) return None def __growiterations_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return int(75) return None def __gain_dflt( self, glb ): if self.__niter( glb ) != int(0): return float(0.1) return None def __scales_dflt( self, glb ): if self.__deconvolver( glb ) == "multiscale": return [] if self.__deconvolver( glb ) == "mtmfs": return [] return None def __psfcutoff_dflt( self, glb ): if self.__calcpsf( glb ) == bool(True): return float(0.35) return None def __robust_dflt( self, glb ): if self.__weighting( glb ) == "briggs": return float(0.5) if self.__weighting( glb ) == "briggsabs": return float(0.5) if self.__weighting( glb ) == "briggsbwtaper": return float(0.5) return None def __vptable_dflt( self, glb ): if self.__gridder( glb ) == "standard": return "" if self.__gridder( glb ) == "widefield": return "" if self.__gridder( glb ) == "wproject": return "" if self.__gridder( glb ) == "wprojectft": return "" if self.__gridder( glb ) == "mosaic": return "" if self.__gridder( glb ) == "mosaicft": return "" if self.__gridder( glb ) == "ftmosaic": return "" if self.__gridder( glb ) == "imagemosaic": return "" return None def __perchanweightdensity_dflt( self, glb ): if self.__specmode( glb ) == "cube": return bool(True) if self.__specmode( glb ) == "cubesource": return bool(True) if self.__specmode( glb ) == "cubedata": return bool(False) return None def __aterm_dflt( self, glb ): if self.__gridder( glb ) == "awproject": return bool(True) if self.__gridder( glb ) == "awprojectft": return bool(True) return None def __usephacenter_dflt( self, glb ): if self.__doreg( glb ) == bool(True): return bool(True) return None def __usepointing_dflt( self, glb ): if self.__gridder( glb ) == "mosaic": return bool(False) if self.__gridder( glb ) == "mosaicft": return bool(False) if self.__gridder( glb ) == "ftmosaic": return bool(False) if self.__gridder( glb ) == "awproject": return bool(False) if self.__gridder( glb ) == "awprojectft": return bool(False) return None def __rotatepastep_dflt( self, glb ): if self.__gridder( glb ) == "awproject": return float(360.0) if self.__gridder( glb ) == "awprojectft": return float(360.0) return None def __threshold_dflt( self, glb ): if self.__niter( glb ) != int(0): return 0.0 return None def __veltype_dflt( self, glb ): if self.__specmode( glb ) == "cube": return "radio" if self.__specmode( glb ) == "cubesource": return "radio" if self.__specmode( glb ) == "cubedata": return "radio" return None def __outframe_dflt( self, glb ): if self.__specmode( glb ) == "cube": return "" if self.__specmode( glb ) == "cubesource": return "REST" return None def __dogrowprune_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return bool(True) return None def __uvrange_dflt( self, glb ): if self.__selectdata( glb ) == bool(True): return "" return None def __psterm_dflt( self, glb ): if self.__gridder( glb ) == "awproject": return bool(False) if self.__gridder( glb ) == "awprojectft": return bool(False) return None def __start_dflt( self, glb ): if self.__specmode( glb ) == "cube": return "" if self.__specmode( glb ) == "cubesource": return "" if self.__specmode( glb ) == "cubedata": return "" return None def __observation_dflt( self, glb ): if self.__selectdata( glb ) == bool(True): return "" return None def __lownoisethreshold_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return float(1.5) return None def __facets_dflt( self, glb ): if self.__gridder( glb ) == "widefield": return int(1) return None def __noisethreshold_dflt( self, glb ): if self.__usemask( glb ) == "auto-multithresh": return float(5.0) return None def __width_dflt( self, glb ): if self.__specmode( glb ) == "cube": return "" if self.__specmode( glb ) == "cubesource": return "" if self.__specmode( glb ) == "cubedata": return "" return None def __spw_dflt( self, glb ): if self.__selectdata( glb ) == bool(True): return "" return None def __timerange_dflt( self, glb ): if self.__selectdata( glb ) == bool(True): return "" return None def __nsigma_dflt( self, glb ): if self.__niter( glb ) != int(0): return float(0.0) return None def __cfcache_dflt( self, glb ): if self.__gridder( glb ) == "awproject": return "" if self.__gridder( glb ) == "awprojectft": return "" return None def __wprojplanes_dflt( self, glb ): if self.__gridder( glb ) == "widefield": return int(1) if self.__gridder( glb ) == "wproject": return int(1) if self.__gridder( glb ) == "wprojectft": return int(1) if self.__gridder( glb ) == "imagemosaic": return int(1) if self.__gridder( glb ) == "awproject": return int(1) if self.__gridder( glb ) == "awprojectft": return int(1) return None #--------- return subparam values ------------------------------------------------- def __usephacenter( self, glb ): if 'usephacenter' in glb: return glb['usephacenter'] dflt = self.__usephacenter_dflt( glb ) if dflt is not None: return dflt return True def __reftime( self, glb ): if 'reftime' in glb: return glb['reftime'] dflt = self.__reftime_dflt( glb ) if dflt is not None: return dflt return '' def __toTb( self, glb ): if 'toTb' in glb: return glb['toTb'] dflt = self.__toTb_dflt( glb ) if dflt is not None: return dflt return False def __sclfactor( self, glb ): if 'sclfactor' in glb: return glb['sclfactor'] dflt = self.__sclfactor_dflt( glb ) if dflt is not None: return dflt return float(1.0) def __subregion( self, glb ): if 'subregion' in glb: return glb['subregion'] dflt = self.__subregion_dflt( glb ) if dflt is not None: return dflt return '' def __docompress( self, glb ): if 'docompress' in glb: return glb['docompress'] dflt = self.__docompress_dflt( glb ) if dflt is not None: return dflt return False def __field( self, glb ): if 'field' in glb: return glb['field'] dflt = self.__field_dflt( glb ) if dflt is not None: return dflt return '' def __spw( self, glb ): if 'spw' in glb: return glb['spw'] dflt = self.__spw_dflt( glb ) if dflt is not None: return dflt return '' def __timerange( self, glb ): if 'timerange' in glb: return glb['timerange'] dflt = self.__timerange_dflt( glb ) if dflt is not None: return dflt return '' def __uvrange( self, glb ): if 'uvrange' in glb: return glb['uvrange'] dflt = self.__uvrange_dflt( glb ) if dflt is not None: return dflt return '' def __antenna( self, glb ): if 'antenna' in glb: return glb['antenna'] dflt = self.__antenna_dflt( glb ) if dflt is not None: return dflt return '' def __scan( self, glb ): if 'scan' in glb: return glb['scan'] dflt = self.__scan_dflt( glb ) if dflt is not None: return dflt return '' def __observation( self, glb ): if 'observation' in glb: return glb['observation'] dflt = self.__observation_dflt( glb ) if dflt is not None: return dflt return '' def __intent( self, glb ): if 'intent' in glb: return glb['intent'] dflt = self.__intent_dflt( glb ) if dflt is not None: return dflt return '' def __reffreq( self, glb ): if 'reffreq' in glb: return glb['reffreq'] dflt = self.__reffreq_dflt( glb ) if dflt is not None: return dflt return '' def __nchan( self, glb ): if 'nchan' in glb: return glb['nchan'] dflt = self.__nchan_dflt( glb ) if dflt is not None: return dflt return int(-1) def __start( self, glb ): if 'start' in glb: return glb['start'] dflt = self.__start_dflt( glb ) if dflt is not None: return dflt return '' def __width( self, glb ): if 'width' in glb: return glb['width'] dflt = self.__width_dflt( glb ) if dflt is not None: return dflt return '' def __outframe( self, glb ): if 'outframe' in glb: return glb['outframe'] dflt = self.__outframe_dflt( glb ) if dflt is not None: return dflt return 'LSRK' def __veltype( self, glb ): if 'veltype' in glb: return glb['veltype'] dflt = self.__veltype_dflt( glb ) if dflt is not None: return dflt return 'radio' def __restfreq( self, glb ): if 'restfreq' in glb: return glb['restfreq'] dflt = self.__restfreq_dflt( glb ) if dflt is not None: return dflt return [ ] def __interpolation( self, glb ): if 'interpolation' in glb: return glb['interpolation'] dflt = self.__interpolation_dflt( glb ) if dflt is not None: return dflt return 'linear' def __perchanweightdensity( self, glb ): if 'perchanweightdensity' in glb: return glb['perchanweightdensity'] dflt = self.__perchanweightdensity_dflt( glb ) if dflt is not None: return dflt return True def __facets( self, glb ): if 'facets' in glb: return glb['facets'] dflt = self.__facets_dflt( glb ) if dflt is not None: return dflt return int(1) def __psfphasecenter( self, glb ): if 'psfphasecenter' in glb: return glb['psfphasecenter'] dflt = self.__psfphasecenter_dflt( glb ) if dflt is not None: return dflt return '' def __wprojplanes( self, glb ): if 'wprojplanes' in glb: return glb['wprojplanes'] dflt = self.__wprojplanes_dflt( glb ) if dflt is not None: return dflt return int(1) def __vptable( self, glb ): if 'vptable' in glb: return glb['vptable'] dflt = self.__vptable_dflt( glb ) if dflt is not None: return dflt return '' def __mosweight( self, glb ): if 'mosweight' in glb: return glb['mosweight'] dflt = self.__mosweight_dflt( glb ) if dflt is not None: return dflt return True def __aterm( self, glb ): if 'aterm' in glb: return glb['aterm'] dflt = self.__aterm_dflt( glb ) if dflt is not None: return dflt return True def __psterm( self, glb ): if 'psterm' in glb: return glb['psterm'] dflt = self.__psterm_dflt( glb ) if dflt is not None: return dflt return False def __wbawp( self, glb ): if 'wbawp' in glb: return glb['wbawp'] dflt = self.__wbawp_dflt( glb ) if dflt is not None: return dflt return True def __conjbeams( self, glb ): if 'conjbeams' in glb: return glb['conjbeams'] dflt = self.__conjbeams_dflt( glb ) if dflt is not None: return dflt return False def __cfcache( self, glb ): if 'cfcache' in glb: return glb['cfcache'] dflt = self.__cfcache_dflt( glb ) if dflt is not None: return dflt return '' def __usepointing( self, glb ): if 'usepointing' in glb: return glb['usepointing'] dflt = self.__usepointing_dflt( glb ) if dflt is not None: return dflt return False def __computepastep( self, glb ): if 'computepastep' in glb: return glb['computepastep'] dflt = self.__computepastep_dflt( glb ) if dflt is not None: return dflt return float(360.0) def __rotatepastep( self, glb ): if 'rotatepastep' in glb: return glb['rotatepastep'] dflt = self.__rotatepastep_dflt( glb ) if dflt is not None: return dflt return float(360.0) def __pointingoffsetsigdev( self, glb ): if 'pointingoffsetsigdev' in glb: return glb['pointingoffsetsigdev'] dflt = self.__pointingoffsetsigdev_dflt( glb ) if dflt is not None: return dflt return [ ] def __pblimit( self, glb ): if 'pblimit' in glb: return glb['pblimit'] dflt = self.__pblimit_dflt( glb ) if dflt is not None: return dflt return float(0.2) def __normtype( self, glb ): if 'normtype' in glb: return glb['normtype'] dflt = self.__normtype_dflt( glb ) if dflt is not None: return dflt return 'flatnoise' def __scales( self, glb ): if 'scales' in glb: return glb['scales'] dflt = self.__scales_dflt( glb ) if dflt is not None: return dflt return [ ] def __nterms( self, glb ): if 'nterms' in glb: return glb['nterms'] dflt = self.__nterms_dflt( glb ) if dflt is not None: return dflt return int(2) def __smallscalebias( self, glb ): if 'smallscalebias' in glb: return glb['smallscalebias'] dflt = self.__smallscalebias_dflt( glb ) if dflt is not None: return dflt return float(0.0) def __restoringbeam( self, glb ): if 'restoringbeam' in glb: return glb['restoringbeam'] dflt = self.__restoringbeam_dflt( glb ) if dflt is not None: return dflt return [ ] def __pbcor( self, glb ): if 'pbcor' in glb: return glb['pbcor'] dflt = self.__pbcor_dflt( glb ) if dflt is not None: return dflt return False def __robust( self, glb ): if 'robust' in glb: return glb['robust'] dflt = self.__robust_dflt( glb ) if dflt is not None: return dflt return float(0.5) def __noise( self, glb ): if 'noise' in glb: return glb['noise'] dflt = self.__noise_dflt( glb ) if dflt is not None: return dflt return '1.0Jy' def __npixels( self, glb ): if 'npixels' in glb: return glb['npixels'] dflt = self.__npixels_dflt( glb ) if dflt is not None: return dflt return int(0) def __uvtaper( self, glb ): if 'uvtaper' in glb: return glb['uvtaper'] dflt = self.__uvtaper_dflt( glb ) if dflt is not None: return dflt return [ '' ] def __gain( self, glb ): if 'gain' in glb: return glb['gain'] dflt = self.__gain_dflt( glb ) if dflt is not None: return dflt return float(0.1) def __threshold( self, glb ): if 'threshold' in glb: return glb['threshold'] dflt = self.__threshold_dflt( glb ) if dflt is not None: return dflt return float(0.0) def __nsigma( self, glb ): if 'nsigma' in glb: return glb['nsigma'] dflt = self.__nsigma_dflt( glb ) if dflt is not None: return dflt return float(0.0) def __cycleniter( self, glb ): if 'cycleniter' in glb: return glb['cycleniter'] dflt = self.__cycleniter_dflt( glb ) if dflt is not None: return dflt return int(-1) def __cyclefactor( self, glb ): if 'cyclefactor' in glb: return glb['cyclefactor'] dflt = self.__cyclefactor_dflt( glb ) if dflt is not None: return dflt return float(1.0) def __minpsffraction( self, glb ): if 'minpsffraction' in glb: return glb['minpsffraction'] dflt = self.__minpsffraction_dflt( glb ) if dflt is not None: return dflt return float(0.05) def __maxpsffraction( self, glb ): if 'maxpsffraction' in glb: return glb['maxpsffraction'] dflt = self.__maxpsffraction_dflt( glb ) if dflt is not None: return dflt return float(0.8) def __interactive( self, glb ): if 'interactive' in glb: return glb['interactive'] dflt = self.__interactive_dflt( glb ) if dflt is not None: return dflt return False def __mask( self, glb ): if 'mask' in glb: return glb['mask'] dflt = self.__mask_dflt( glb ) if dflt is not None: return dflt return '' def __pbmask( self, glb ): if 'pbmask' in glb: return glb['pbmask'] dflt = self.__pbmask_dflt( glb ) if dflt is not None: return dflt return float(0.0) def __sidelobethreshold( self, glb ): if 'sidelobethreshold' in glb: return glb['sidelobethreshold'] dflt = self.__sidelobethreshold_dflt( glb ) if dflt is not None: return dflt return float(3.0) def __noisethreshold( self, glb ): if 'noisethreshold' in glb: return glb['noisethreshold'] dflt = self.__noisethreshold_dflt( glb ) if dflt is not None: return dflt return float(5.0) def __lownoisethreshold( self, glb ): if 'lownoisethreshold' in glb: return glb['lownoisethreshold'] dflt = self.__lownoisethreshold_dflt( glb ) if dflt is not None: return dflt return float(1.5) def __negativethreshold( self, glb ): if 'negativethreshold' in glb: return glb['negativethreshold'] dflt = self.__negativethreshold_dflt( glb ) if dflt is not None: return dflt return float(0.0) def __smoothfactor( self, glb ): if 'smoothfactor' in glb: return glb['smoothfactor'] dflt = self.__smoothfactor_dflt( glb ) if dflt is not None: return dflt return float(1.0) def __minbeamfrac( self, glb ): if 'minbeamfrac' in glb: return glb['minbeamfrac'] dflt = self.__minbeamfrac_dflt( glb ) if dflt is not None: return dflt return float(0.3) def __cutthreshold( self, glb ): if 'cutthreshold' in glb: return glb['cutthreshold'] dflt = self.__cutthreshold_dflt( glb ) if dflt is not None: return dflt return float(0.01) def __growiterations( self, glb ): if 'growiterations' in glb: return glb['growiterations'] dflt = self.__growiterations_dflt( glb ) if dflt is not None: return dflt return int(75) def __dogrowprune( self, glb ): if 'dogrowprune' in glb: return glb['dogrowprune'] dflt = self.__dogrowprune_dflt( glb ) if dflt is not None: return dflt return True def __minpercentchange( self, glb ): if 'minpercentchange' in glb: return glb['minpercentchange'] dflt = self.__minpercentchange_dflt( glb ) if dflt is not None: return dflt return float(-1.0) def __verbose( self, glb ): if 'verbose' in glb: return glb['verbose'] dflt = self.__verbose_dflt( glb ) if dflt is not None: return dflt return False def __psfcutoff( self, glb ): if 'psfcutoff' in glb: return glb['psfcutoff'] dflt = self.__psfcutoff_dflt( glb ) if dflt is not None: return dflt return float(0.35) #--------- subparam inp output ---------------------------------------------------- def __vis_inp(self): description = 'Name of input visibility file(s)' value = self.__vis( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'vis': value},{'vis': self.__schema['vis']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('vis',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __imageprefix_inp(self): description = '' value = self.__imageprefix( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'imageprefix': value},{'imageprefix': self.__schema['imageprefix']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('imageprefix',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __imagesuffix_inp(self): description = '' value = self.__imagesuffix( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'imagesuffix': value},{'imagesuffix': self.__schema['imagesuffix']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('imagesuffix',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __ncpu_inp(self): description = '' value = self.__ncpu( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'ncpu': value},{'ncpu': self.__schema['ncpu']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('ncpu',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __twidth_inp(self): description = '' value = self.__twidth( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'twidth': value},{'twidth': self.__schema['twidth']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('twidth',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __doreg_inp(self): description = '' value = self.__doreg( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'doreg': value},{'doreg': self.__schema['doreg']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('doreg',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __usephacenter_inp(self): if self.__usephacenter_dflt( self.__globals_( ) ) is not None: description = '' value = self.__usephacenter( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'usephacenter': value},{'usephacenter': self.__schema['usephacenter']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('usephacenter',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __reftime_inp(self): if self.__reftime_dflt( self.__globals_( ) ) is not None: description = '' value = self.__reftime( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'reftime': value},{'reftime': self.__schema['reftime']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('reftime',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __toTb_inp(self): if self.__toTb_dflt( self.__globals_( ) ) is not None: description = '' value = self.__toTb( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'toTb': value},{'toTb': self.__schema['toTb']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('toTb',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __sclfactor_inp(self): if self.__sclfactor_dflt( self.__globals_( ) ) is not None: description = '' value = self.__sclfactor( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'sclfactor': value},{'sclfactor': self.__schema['sclfactor']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('sclfactor',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __subregion_inp(self): if self.__subregion_dflt( self.__globals_( ) ) is not None: description = 'The name of a CASA region string' value = self.__subregion( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'subregion': value},{'subregion': self.__schema['subregion']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('subregion',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __docompress_inp(self): if self.__docompress_dflt( self.__globals_( ) ) is not None: description = '' value = self.__docompress( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'docompress': value},{'docompress': self.__schema['docompress']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('docompress',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __overwrite_inp(self): description = '' value = self.__overwrite( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'overwrite': value},{'overwrite': self.__schema['overwrite']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('overwrite',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __selectdata_inp(self): description = 'Enable data selection parameters' value = self.__selectdata( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'selectdata': value},{'selectdata': self.__schema['selectdata']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('selectdata',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __field_inp(self): if self.__field_dflt( self.__globals_( ) ) is not None: description = 'field(s) to select' value = self.__field( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'field': value},{'field': self.__schema['field']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('field',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __spw_inp(self): if self.__spw_dflt( self.__globals_( ) ) is not None: description = 'spw(s)/channels to select' value = self.__spw( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'spw': value},{'spw': self.__schema['spw']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('spw',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __timerange_inp(self): if self.__timerange_dflt( self.__globals_( ) ) is not None: description = 'Range of time to select from data' value = self.__timerange( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'timerange': value},{'timerange': self.__schema['timerange']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('timerange',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __uvrange_inp(self): if self.__uvrange_dflt( self.__globals_( ) ) is not None: description = 'Select data within uvrange' value = self.__uvrange( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'uvrange': value},{'uvrange': self.__schema['uvrange']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('uvrange',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __antenna_inp(self): if self.__antenna_dflt( self.__globals_( ) ) is not None: description = 'Select data based on antenna/baseline' value = self.__antenna( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'antenna': value},{'antenna': self.__schema['antenna']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('antenna',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __scan_inp(self): if self.__scan_dflt( self.__globals_( ) ) is not None: description = 'Scan number range' value = self.__scan( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'scan': value},{'scan': self.__schema['scan']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('scan',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __observation_inp(self): if self.__observation_dflt( self.__globals_( ) ) is not None: description = 'Observation ID range' value = self.__observation( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'observation': value},{'observation': self.__schema['observation']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('observation',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __intent_inp(self): if self.__intent_dflt( self.__globals_( ) ) is not None: description = 'Scan Intent(s)' value = self.__intent( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'intent': value},{'intent': self.__schema['intent']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('intent',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __datacolumn_inp(self): description = 'Data column to image(data,corrected)' value = self.__datacolumn( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'datacolumn': value},{'datacolumn': self.__schema['datacolumn']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('datacolumn',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __imagename_inp(self): description = 'Pre-name of output images' value = self.__imagename( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'imagename': value},{'imagename': self.__schema['imagename']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('imagename',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __imsize_inp(self): description = 'Number of pixels' value = self.__imsize( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'imsize': value},{'imsize': self.__schema['imsize']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('imsize',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __cell_inp(self): description = 'Cell size' value = self.__cell( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'cell': value},{'cell': self.__schema['cell']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('cell',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __phasecenter_inp(self): description = 'Phase center of the image' value = self.__phasecenter( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'phasecenter': value},{'phasecenter': self.__schema['phasecenter']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('phasecenter',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __stokes_inp(self): description = 'Stokes Planes to make' value = self.__stokes( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'stokes': value},{'stokes': self.__schema['stokes']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('stokes',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __projection_inp(self): description = 'Coordinate projection' value = self.__projection( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'projection': value},{'projection': self.__schema['projection']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('projection',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __startmodel_inp(self): description = 'Name of starting model image' value = self.__startmodel( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'startmodel': value},{'startmodel': self.__schema['startmodel']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('startmodel',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __specmode_inp(self): description = 'Spectral definition mode (mfs,cube,cubedata, cubesource)' value = self.__specmode( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'specmode': value},{'specmode': self.__schema['specmode']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('specmode',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __reffreq_inp(self): if self.__reffreq_dflt( self.__globals_( ) ) is not None: description = 'Reference frequency' value = self.__reffreq( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'reffreq': value},{'reffreq': self.__schema['reffreq']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('reffreq',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __nchan_inp(self): if self.__nchan_dflt( self.__globals_( ) ) is not None: description = 'Number of channels in the output image' value = self.__nchan( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'nchan': value},{'nchan': self.__schema['nchan']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('nchan',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __start_inp(self): if self.__start_dflt( self.__globals_( ) ) is not None: description = 'First channel (e.g. start=3,start=\'1.1GHz\',start=\'15343km/s\')' value = self.__start( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'start': value},{'start': self.__schema['start']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('start',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __width_inp(self): if self.__width_dflt( self.__globals_( ) ) is not None: description = 'Channel width (e.g. width=2,width=\'0.1MHz\',width=\'10km/s\')' value = self.__width( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'width': value},{'width': self.__schema['width']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('width',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __outframe_inp(self): if self.__outframe_dflt( self.__globals_( ) ) is not None: description = 'Spectral reference frame in which to interpret \'start\' and \'width\'' value = self.__outframe( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'outframe': value},{'outframe': self.__schema['outframe']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('outframe',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __veltype_inp(self): if self.__veltype_dflt( self.__globals_( ) ) is not None: description = 'Velocity type (radio, z, ratio, beta, gamma, optical)' value = self.__veltype( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'veltype': value},{'veltype': self.__schema['veltype']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('veltype',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __restfreq_inp(self): if self.__restfreq_dflt( self.__globals_( ) ) is not None: description = 'List of rest frequencies' value = self.__restfreq( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'restfreq': value},{'restfreq': self.__schema['restfreq']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('restfreq',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __interpolation_inp(self): if self.__interpolation_dflt( self.__globals_( ) ) is not None: description = 'Spectral interpolation (nearest,linear,cubic)' value = self.__interpolation( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'interpolation': value},{'interpolation': self.__schema['interpolation']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('interpolation',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __perchanweightdensity_inp(self): if self.__perchanweightdensity_dflt( self.__globals_( ) ) is not None: description = 'whether to calculate weight density per channel in Briggs style weighting or not' value = self.__perchanweightdensity( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'perchanweightdensity': value},{'perchanweightdensity': self.__schema['perchanweightdensity']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('perchanweightdensity',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __gridder_inp(self): description = 'Gridding options (standard, wproject, widefield, mosaic, awproject)' value = self.__gridder( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'gridder': value},{'gridder': self.__schema['gridder']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('gridder',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __facets_inp(self): if self.__facets_dflt( self.__globals_( ) ) is not None: description = 'Number of facets on a side' value = self.__facets( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'facets': value},{'facets': self.__schema['facets']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('facets',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __psfphasecenter_inp(self): if self.__psfphasecenter_dflt( self.__globals_( ) ) is not None: description = 'optional direction to calculate psf for mosaic (default is image phasecenter)' value = self.__psfphasecenter( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'psfphasecenter': value},{'psfphasecenter': self.__schema['psfphasecenter']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('psfphasecenter',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __wprojplanes_inp(self): if self.__wprojplanes_dflt( self.__globals_( ) ) is not None: description = 'Number of distinct w-values for convolution functions' value = self.__wprojplanes( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'wprojplanes': value},{'wprojplanes': self.__schema['wprojplanes']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('wprojplanes',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __vptable_inp(self): if self.__vptable_dflt( self.__globals_( ) ) is not None: description = 'Name of Voltage Pattern table' value = self.__vptable( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'vptable': value},{'vptable': self.__schema['vptable']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('vptable',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __mosweight_inp(self): if self.__mosweight_dflt( self.__globals_( ) ) is not None: description = 'Indepently weight each field in a mosaic' value = self.__mosweight( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'mosweight': value},{'mosweight': self.__schema['mosweight']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('mosweight',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __aterm_inp(self): if self.__aterm_dflt( self.__globals_( ) ) is not None: description = 'Use aperture illumination functions during gridding' value = self.__aterm( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'aterm': value},{'aterm': self.__schema['aterm']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('aterm',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __psterm_inp(self): if self.__psterm_dflt( self.__globals_( ) ) is not None: description = 'Use prolate spheroidal during gridding' value = self.__psterm( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'psterm': value},{'psterm': self.__schema['psterm']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('psterm',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __wbawp_inp(self): if self.__wbawp_dflt( self.__globals_( ) ) is not None: description = 'Use wideband A-terms' value = self.__wbawp( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'wbawp': value},{'wbawp': self.__schema['wbawp']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('wbawp',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __conjbeams_inp(self): if self.__conjbeams_dflt( self.__globals_( ) ) is not None: description = 'Use conjugate frequency for wideband A-terms' value = self.__conjbeams( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'conjbeams': value},{'conjbeams': self.__schema['conjbeams']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('conjbeams',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __cfcache_inp(self): if self.__cfcache_dflt( self.__globals_( ) ) is not None: description = 'Convolution function cache directory name' value = self.__cfcache( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'cfcache': value},{'cfcache': self.__schema['cfcache']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('cfcache',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __usepointing_inp(self): if self.__usepointing_dflt( self.__globals_( ) ) is not None: description = 'The parameter makes the gridder utilize the pointing table phase directions while computing the residual image.' value = self.__usepointing( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'usepointing': value},{'usepointing': self.__schema['usepointing']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('usepointing',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __computepastep_inp(self): if self.__computepastep_dflt( self.__globals_( ) ) is not None: description = 'Parallactic angle interval after the AIFs are recomputed (deg)' value = self.__computepastep( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'computepastep': value},{'computepastep': self.__schema['computepastep']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('computepastep',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __rotatepastep_inp(self): if self.__rotatepastep_dflt( self.__globals_( ) ) is not None: description = 'Parallactic angle interval after which the nearest AIF is rotated (deg)' value = self.__rotatepastep( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'rotatepastep': value},{'rotatepastep': self.__schema['rotatepastep']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('rotatepastep',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __pointingoffsetsigdev_inp(self): if self.__pointingoffsetsigdev_dflt( self.__globals_( ) ) is not None: description = 'Pointing offset threshold to determine heterogeneity of pointing corrections for the AWProject gridder' value = self.__pointingoffsetsigdev( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'pointingoffsetsigdev': value},{'pointingoffsetsigdev': self.__schema['pointingoffsetsigdev']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('pointingoffsetsigdev',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __pblimit_inp(self): if self.__pblimit_dflt( self.__globals_( ) ) is not None: description = 'PB gain level at which to cut off normalizations' value = self.__pblimit( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'pblimit': value},{'pblimit': self.__schema['pblimit']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('pblimit',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __normtype_inp(self): if self.__normtype_dflt( self.__globals_( ) ) is not None: description = 'Normalization type (flatnoise, flatsky,pbsquare)' value = self.__normtype( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'normtype': value},{'normtype': self.__schema['normtype']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('normtype',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __deconvolver_inp(self): description = 'Minor cycle algorithm (hogbom,clark,multiscale,mtmfs,mem,clarkstokes)' value = self.__deconvolver( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'deconvolver': value},{'deconvolver': self.__schema['deconvolver']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('deconvolver',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __scales_inp(self): if self.__scales_dflt( self.__globals_( ) ) is not None: description = 'List of scale sizes (in pixels) for multi-scale algorithms' value = self.__scales( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'scales': value},{'scales': self.__schema['scales']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('scales',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __nterms_inp(self): if self.__nterms_dflt( self.__globals_( ) ) is not None: description = 'Number of Taylor coefficients in the spectral model' value = self.__nterms( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'nterms': value},{'nterms': self.__schema['nterms']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('nterms',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __smallscalebias_inp(self): if self.__smallscalebias_dflt( self.__globals_( ) ) is not None: description = 'Biases the scale selection when using multi-scale or mtmfs deconvolvers' value = self.__smallscalebias( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'smallscalebias': value},{'smallscalebias': self.__schema['smallscalebias']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('smallscalebias',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __restoration_inp(self): description = 'Do restoration steps (or not)' value = self.__restoration( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'restoration': value},{'restoration': self.__schema['restoration']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('restoration',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __restoringbeam_inp(self): if self.__restoringbeam_dflt( self.__globals_( ) ) is not None: description = 'Restoring beam shape to use. Default is the PSF main lobe' value = self.__restoringbeam( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'restoringbeam': value},{'restoringbeam': self.__schema['restoringbeam']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('restoringbeam',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __pbcor_inp(self): if self.__pbcor_dflt( self.__globals_( ) ) is not None: description = 'Apply PB correction on the output restored image' value = self.__pbcor( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'pbcor': value},{'pbcor': self.__schema['pbcor']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('pbcor',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __outlierfile_inp(self): description = 'Name of outlier-field image definitions' value = self.__outlierfile( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'outlierfile': value},{'outlierfile': self.__schema['outlierfile']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('outlierfile',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __weighting_inp(self): description = 'Weighting scheme (natural,uniform,briggs, briggsabs[experimental], briggsbwtaper[experimental])' value = self.__weighting( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'weighting': value},{'weighting': self.__schema['weighting']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('weighting',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __robust_inp(self): if self.__robust_dflt( self.__globals_( ) ) is not None: description = 'Robustness parameter' value = self.__robust( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'robust': value},{'robust': self.__schema['robust']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('robust',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __noise_inp(self): if self.__noise_dflt( self.__globals_( ) ) is not None: description = 'noise parameter for briggs abs mode weighting' value = self.__noise( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'noise': value},{'noise': self.__schema['noise']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('noise',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __npixels_inp(self): if self.__npixels_dflt( self.__globals_( ) ) is not None: description = 'Number of pixels to determine uv-cell size' value = self.__npixels( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'npixels': value},{'npixels': self.__schema['npixels']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('npixels',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __uvtaper_inp(self): if self.__uvtaper_dflt( self.__globals_( ) ) is not None: description = 'uv-taper on outer baselines in uv-plane' value = self.__uvtaper( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'uvtaper': value},{'uvtaper': self.__schema['uvtaper']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('uvtaper',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __niter_inp(self): description = 'Maximum number of iterations' value = self.__niter( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'niter': value},{'niter': self.__schema['niter']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('niter',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __gain_inp(self): if self.__gain_dflt( self.__globals_( ) ) is not None: description = 'Loop gain' value = self.__gain( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'gain': value},{'gain': self.__schema['gain']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('gain',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __threshold_inp(self): if self.__threshold_dflt( self.__globals_( ) ) is not None: description = 'Stopping threshold' value = self.__threshold( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'threshold': value},{'threshold': self.__schema['threshold']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('threshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __nsigma_inp(self): if self.__nsigma_dflt( self.__globals_( ) ) is not None: description = 'Multiplicative factor for rms-based threshold stopping' value = self.__nsigma( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'nsigma': value},{'nsigma': self.__schema['nsigma']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('nsigma',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __cycleniter_inp(self): if self.__cycleniter_dflt( self.__globals_( ) ) is not None: description = 'Maximum number of minor-cycle iterations' value = self.__cycleniter( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'cycleniter': value},{'cycleniter': self.__schema['cycleniter']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('cycleniter',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __cyclefactor_inp(self): if self.__cyclefactor_dflt( self.__globals_( ) ) is not None: description = 'Scaling on PSF sidelobe level to compute the minor-cycle stopping threshold.' value = self.__cyclefactor( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'cyclefactor': value},{'cyclefactor': self.__schema['cyclefactor']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('cyclefactor',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __minpsffraction_inp(self): if self.__minpsffraction_dflt( self.__globals_( ) ) is not None: description = 'PSF fraction that marks the max depth of cleaning in the minor cycle' value = self.__minpsffraction( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'minpsffraction': value},{'minpsffraction': self.__schema['minpsffraction']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('minpsffraction',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __maxpsffraction_inp(self): if self.__maxpsffraction_dflt( self.__globals_( ) ) is not None: description = 'PSF fraction that marks the minimum depth of cleaning in the minor cycle' value = self.__maxpsffraction( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'maxpsffraction': value},{'maxpsffraction': self.__schema['maxpsffraction']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('maxpsffraction',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __interactive_inp(self): if self.__interactive_dflt( self.__globals_( ) ) is not None: description = 'Modify masks and parameters at runtime' value = self.__interactive( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'interactive': value},{'interactive': self.__schema['interactive']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('interactive',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __usemask_inp(self): description = 'Type of mask(s) for deconvolution: user, pb, or auto-multithresh' value = self.__usemask( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'usemask': value},{'usemask': self.__schema['usemask']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('usemask',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __mask_inp(self): if self.__mask_dflt( self.__globals_( ) ) is not None: description = 'Mask (a list of image name(s) or region file(s) or region string(s) )' value = self.__mask( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'mask': value},{'mask': self.__schema['mask']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('mask',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __pbmask_inp(self): if self.__pbmask_dflt( self.__globals_( ) ) is not None: description = 'primary beam mask' value = self.__pbmask( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'pbmask': value},{'pbmask': self.__schema['pbmask']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('pbmask',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __sidelobethreshold_inp(self): if self.__sidelobethreshold_dflt( self.__globals_( ) ) is not None: description = 'sidelobethreshold * the max sidelobe level * peak residual' value = self.__sidelobethreshold( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'sidelobethreshold': value},{'sidelobethreshold': self.__schema['sidelobethreshold']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('sidelobethreshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __noisethreshold_inp(self): if self.__noisethreshold_dflt( self.__globals_( ) ) is not None: description = 'noisethreshold * rms in residual image + location(median)' value = self.__noisethreshold( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'noisethreshold': value},{'noisethreshold': self.__schema['noisethreshold']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('noisethreshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __lownoisethreshold_inp(self): if self.__lownoisethreshold_dflt( self.__globals_( ) ) is not None: description = 'lownoisethreshold * rms in residual image + location(median)' value = self.__lownoisethreshold( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'lownoisethreshold': value},{'lownoisethreshold': self.__schema['lownoisethreshold']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('lownoisethreshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __negativethreshold_inp(self): if self.__negativethreshold_dflt( self.__globals_( ) ) is not None: description = 'negativethreshold * rms in residual image + location(median)' value = self.__negativethreshold( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'negativethreshold': value},{'negativethreshold': self.__schema['negativethreshold']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('negativethreshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __smoothfactor_inp(self): if self.__smoothfactor_dflt( self.__globals_( ) ) is not None: description = 'smoothing factor in a unit of the beam' value = self.__smoothfactor( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'smoothfactor': value},{'smoothfactor': self.__schema['smoothfactor']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('smoothfactor',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __minbeamfrac_inp(self): if self.__minbeamfrac_dflt( self.__globals_( ) ) is not None: description = 'minimum beam fraction for pruning' value = self.__minbeamfrac( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'minbeamfrac': value},{'minbeamfrac': self.__schema['minbeamfrac']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('minbeamfrac',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __cutthreshold_inp(self): if self.__cutthreshold_dflt( self.__globals_( ) ) is not None: description = 'threshold to cut the smoothed mask to create a final mask' value = self.__cutthreshold( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'cutthreshold': value},{'cutthreshold': self.__schema['cutthreshold']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('cutthreshold',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __growiterations_inp(self): if self.__growiterations_dflt( self.__globals_( ) ) is not None: description = 'number of binary dilation iterations for growing the mask' value = self.__growiterations( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'growiterations': value},{'growiterations': self.__schema['growiterations']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('growiterations',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __dogrowprune_inp(self): if self.__dogrowprune_dflt( self.__globals_( ) ) is not None: description = 'Do pruning on the grow mask' value = self.__dogrowprune( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'dogrowprune': value},{'dogrowprune': self.__schema['dogrowprune']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('dogrowprune',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __minpercentchange_inp(self): if self.__minpercentchange_dflt( self.__globals_( ) ) is not None: description = 'minimum percentage change in mask size (per channel plane) to trigger updating of mask by automask' value = self.__minpercentchange( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'minpercentchange': value},{'minpercentchange': self.__schema['minpercentchange']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('minpercentchange',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __verbose_inp(self): if self.__verbose_dflt( self.__globals_( ) ) is not None: description = 'True: print more automasking information in the logger' value = self.__verbose( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'verbose': value},{'verbose': self.__schema['verbose']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('verbose',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __fastnoise_inp(self): description = 'True: use the faster (old) noise calculation. False: use the new improved noise calculations' value = self.__fastnoise( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'fastnoise': value},{'fastnoise': self.__schema['fastnoise']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('fastnoise',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __restart_inp(self): description = 'True : Re-use existing images. False : Increment imagename' value = self.__restart( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'restart': value},{'restart': self.__schema['restart']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('restart',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __savemodel_inp(self): description = 'Options to save model visibilities (none, virtual, modelcolumn)' value = self.__savemodel( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'savemodel': value},{'savemodel': self.__schema['savemodel']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('savemodel',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __calcres_inp(self): description = 'Calculate initial residual image' value = self.__calcres( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'calcres': value},{'calcres': self.__schema['calcres']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('calcres',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) def __calcpsf_inp(self): description = 'Calculate PSF' value = self.__calcpsf( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'calcpsf': value},{'calcpsf': self.__schema['calcpsf']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('\x1B[1m\x1B[47m%-23.23s =\x1B[0m %s%-23s%s' % ('calcpsf',pre,self.__to_string_(value),post),description,13+len(pre)+len(post)) def __psfcutoff_inp(self): if self.__psfcutoff_dflt( self.__globals_( ) ) is not None: description = 'All pixels in the main lobe of the PSF above psfcutoff are used to fit a Gaussian beam (the Clean beam).' value = self.__psfcutoff( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'psfcutoff': value},{'psfcutoff': self.__schema['psfcutoff']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output(' \x1B[92m%-20.20s =\x1B[0m %s%-23s%s' % ('psfcutoff',pre,self.__to_string_(value),post),description,9+len(pre)+len(post)) def __parallel_inp(self): description = 'Run major cycles in parallel' value = self.__parallel( self.__globals_( ) ) (pre,post) = ('','') if self.__validate_({'parallel': value},{'parallel': self.__schema['parallel']}) else ('\x1B[91m','\x1B[0m') self.__do_inp_output('%-23.23s = %s%-23s%s' % ('parallel',pre,self.__to_string_(value),post),description,0+len(pre)+len(post)) #--------- global default implementation------------------------------------------- @static_var('state', __sf__('casa_inp_go_state')) def set_global_defaults(self): self.set_global_defaults.state['last'] = self glb = self.__globals_( ) if 'antenna' in glb: del glb['antenna'] if 'smoothfactor' in glb: del glb['smoothfactor'] if 'stokes' in glb: del glb['stokes'] if 'negativethreshold' in glb: del glb['negativethreshold'] if 'deconvolver' in glb: del glb['deconvolver'] if 'minbeamfrac' in glb: del glb['minbeamfrac'] if 'doreg' in glb: del glb['doreg'] if 'savemodel' in glb: del glb['savemodel'] if 'psfphasecenter' in glb: del glb['psfphasecenter'] if 'mask' in glb: del glb['mask'] if 'sclfactor' in glb: del glb['sclfactor'] if 'field' in glb: del glb['field'] if 'cutthreshold' in glb: del glb['cutthreshold'] if 'projection' in glb: del glb['projection'] if 'pblimit' in glb: del glb['pblimit'] if 'smallscalebias' in glb: del glb['smallscalebias'] if 'maxpsffraction' in glb: del glb['maxpsffraction'] if 'datacolumn' in glb: del glb['datacolumn'] if 'verbose' in glb: del glb['verbose'] if 'weighting' in glb: del glb['weighting'] if 'intent' in glb: del glb['intent'] if 'noise' in glb: del glb['noise'] if 'interpolation' in glb: del glb['interpolation'] if 'subregion' in glb: del glb['subregion'] if 'nterms' in glb: del glb['nterms'] if 'pointingoffsetsigdev' in glb: del glb['pointingoffsetsigdev'] if 'nchan' in glb: del glb['nchan'] if 'reffreq' in glb: del glb['reffreq'] if 'conjbeams' in glb: del glb['conjbeams'] if 'restoringbeam' in glb: del glb['restoringbeam'] if 'sidelobethreshold' in glb: del glb['sidelobethreshold'] if 'reftime' in glb: del glb['reftime'] if 'gridder' in glb: del glb['gridder'] if 'cycleniter' in glb: del glb['cycleniter'] if 'imagename' in glb: del glb['imagename'] if 'minpsffraction' in glb: del glb['minpsffraction'] if 'imsize' in glb: del glb['imsize'] if 'scan' in glb: del glb['scan'] if 'vis' in glb: del glb['vis'] if 'outlierfile' in glb: del glb['outlierfile'] if 'computepastep' in glb: del glb['computepastep'] if 'minpercentchange' in glb: del glb['minpercentchange'] if 'fastnoise' in glb: del glb['fastnoise'] if 'wbawp' in glb: del glb['wbawp'] if 'docompress' in glb: del glb['docompress'] if 'interactive' in glb: del glb['interactive'] if 'specmode' in glb: del glb['specmode'] if 'npixels' in glb: del glb['npixels'] if 'mosweight' in glb: del glb['mosweight'] if 'pbcor' in glb: del glb['pbcor'] if 'calcres' in glb: del glb['calcres'] if 'normtype' in glb: del glb['normtype'] if 'uvtaper' in glb: del glb['uvtaper'] if 'cyclefactor' in glb: del glb['cyclefactor'] if 'toTb' in glb: del glb['toTb'] if 'restfreq' in glb: del glb['restfreq'] if 'imageprefix' in glb: del glb['imageprefix'] if 'pbmask' in glb: del glb['pbmask'] if 'growiterations' in glb: del glb['growiterations'] if 'gain' in glb: del glb['gain'] if 'scales' in glb: del glb['scales'] if 'twidth' in glb: del glb['twidth'] if 'psfcutoff' in glb: del glb['psfcutoff'] if 'robust' in glb: del glb['robust'] if 'vptable' in glb: del glb['vptable'] if 'perchanweightdensity' in glb: del glb['perchanweightdensity'] if 'aterm' in glb: del glb['aterm'] if 'imagesuffix' in glb: del glb['imagesuffix'] if 'usephacenter' in glb: del glb['usephacenter'] if 'usepointing' in glb: del glb['usepointing'] if 'rotatepastep' in glb: del glb['rotatepastep'] if 'threshold' in glb: del glb['threshold'] if 'ncpu' in glb: del glb['ncpu'] if 'veltype' in glb: del glb['veltype'] if 'calcpsf' in glb: del glb['calcpsf'] if 'usemask' in glb: del glb['usemask'] if 'restoration' in glb: del glb['restoration'] if 'niter' in glb: del glb['niter'] if 'outframe' in glb: del glb['outframe'] if 'dogrowprune' in glb: del glb['dogrowprune'] if 'cell' in glb: del glb['cell'] if 'uvrange' in glb: del glb['uvrange'] if 'psterm' in glb: del glb['psterm'] if 'phasecenter' in glb: del glb['phasecenter'] if 'overwrite' in glb: del glb['overwrite'] if 'restart' in glb: del glb['restart'] if 'start' in glb: del glb['start'] if 'observation' in glb: del glb['observation'] if 'lownoisethreshold' in glb: del glb['lownoisethreshold'] if 'facets' in glb: del glb['facets'] if 'noisethreshold' in glb: del glb['noisethreshold'] if 'width' in glb: del glb['width'] if 'spw' in glb: del glb['spw'] if 'selectdata' in glb: del glb['selectdata'] if 'timerange' in glb: del glb['timerange'] if 'parallel' in glb: del glb['parallel'] if 'nsigma' in glb: del glb['nsigma'] if 'cfcache' in glb: del glb['cfcache'] if 'wprojplanes' in glb: del glb['wprojplanes'] if 'startmodel' in glb: del glb['startmodel'] #--------- inp function ----------------------------------------------------------- def inp(self): print("# ptclean6 -- %s" % self._info_desc_) self.term_width, self.term_height = shutil.get_terminal_size(fallback=(80, 24)) self.__vis_inp( ) self.__imageprefix_inp( ) self.__imagesuffix_inp( ) self.__ncpu_inp( ) self.__twidth_inp( ) self.__doreg_inp( ) self.__usephacenter_inp( ) self.__reftime_inp( ) self.__toTb_inp( ) self.__sclfactor_inp( ) self.__subregion_inp( ) self.__docompress_inp( ) self.__overwrite_inp( ) self.__selectdata_inp( ) self.__field_inp( ) self.__spw_inp( ) self.__timerange_inp( ) self.__uvrange_inp( ) self.__antenna_inp( ) self.__scan_inp( ) self.__observation_inp( ) self.__intent_inp( ) self.__datacolumn_inp( ) self.__imagename_inp( ) self.__imsize_inp( ) self.__cell_inp( ) self.__phasecenter_inp( ) self.__stokes_inp( ) self.__projection_inp( ) self.__startmodel_inp( ) self.__specmode_inp( ) self.__reffreq_inp( ) self.__nchan_inp( ) self.__start_inp( ) self.__width_inp( ) self.__outframe_inp( ) self.__veltype_inp( ) self.__restfreq_inp( ) self.__interpolation_inp( ) self.__perchanweightdensity_inp( ) self.__gridder_inp( ) self.__facets_inp( ) self.__psfphasecenter_inp( ) self.__wprojplanes_inp( ) self.__vptable_inp( ) self.__mosweight_inp( ) self.__aterm_inp( ) self.__psterm_inp( ) self.__wbawp_inp( ) self.__conjbeams_inp( ) self.__cfcache_inp( ) self.__usepointing_inp( ) self.__computepastep_inp( ) self.__rotatepastep_inp( ) self.__pointingoffsetsigdev_inp( ) self.__pblimit_inp( ) self.__normtype_inp( ) self.__deconvolver_inp( ) self.__scales_inp( ) self.__nterms_inp( ) self.__smallscalebias_inp( ) self.__restoration_inp( ) self.__restoringbeam_inp( ) self.__pbcor_inp( ) self.__outlierfile_inp( ) self.__weighting_inp( ) self.__robust_inp( ) self.__noise_inp( ) self.__npixels_inp( ) self.__uvtaper_inp( ) self.__niter_inp( ) self.__gain_inp( ) self.__threshold_inp( ) self.__nsigma_inp( ) self.__cycleniter_inp( ) self.__cyclefactor_inp( ) self.__minpsffraction_inp( ) self.__maxpsffraction_inp( ) self.__interactive_inp( ) self.__usemask_inp( ) self.__mask_inp( ) self.__pbmask_inp( ) self.__sidelobethreshold_inp( ) self.__noisethreshold_inp( ) self.__lownoisethreshold_inp( ) self.__negativethreshold_inp( ) self.__smoothfactor_inp( ) self.__minbeamfrac_inp( ) self.__cutthreshold_inp( ) self.__growiterations_inp( ) self.__dogrowprune_inp( ) self.__minpercentchange_inp( ) self.__verbose_inp( ) self.__fastnoise_inp( ) self.__restart_inp( ) self.__savemodel_inp( ) self.__calcres_inp( ) self.__calcpsf_inp( ) self.__psfcutoff_inp( ) self.__parallel_inp( ) #--------- tget function ---------------------------------------------------------- @static_var('state', __sf__('casa_inp_go_state')) def tget(self,file=None): from casashell.private.stack_manip import find_frame from runpy import run_path filename = None if file is None: if os.path.isfile("ptclean6.last"): filename = "ptclean6.last" elif isinstance(file, str): if os.path.isfile(file): filename = file if filename is not None: glob = find_frame( ) newglob = run_path( filename, init_globals={ } ) for i in newglob: glob[i] = newglob[i] self.tget.state['last'] = self else: print("could not find last file, setting defaults instead...") self.set_global_defaults( ) def __call__( self, vis=None, imageprefix=None, imagesuffix=None, ncpu=None, twidth=None, doreg=None, usephacenter=None, reftime=None, toTb=None, sclfactor=None, subregion=None, docompress=None, overwrite=None, selectdata=None, field=None, spw=None, timerange=None, uvrange=None, antenna=None, scan=None, observation=None, intent=None, datacolumn=None, imagename=None, imsize=None, cell=None, phasecenter=None, stokes=None, projection=None, startmodel=None, specmode=None, reffreq=None, nchan=None, start=None, width=None, outframe=None, veltype=None, restfreq=None, interpolation=None, perchanweightdensity=None, gridder=None, facets=None, psfphasecenter=None, wprojplanes=None, vptable=None, mosweight=None, aterm=None, psterm=None, wbawp=None, conjbeams=None, cfcache=None, usepointing=None, computepastep=None, rotatepastep=None, pointingoffsetsigdev=None, pblimit=None, normtype=None, deconvolver=None, scales=None, nterms=None, smallscalebias=None, restoration=None, restoringbeam=None, pbcor=None, outlierfile=None, weighting=None, robust=None, noise=None, npixels=None, uvtaper=None, niter=None, gain=None, threshold=None, nsigma=None, cycleniter=None, cyclefactor=None, minpsffraction=None, maxpsffraction=None, interactive=None, usemask=None, mask=None, pbmask=None, sidelobethreshold=None, noisethreshold=None, lownoisethreshold=None, negativethreshold=None, smoothfactor=None, minbeamfrac=None, cutthreshold=None, growiterations=None, dogrowprune=None, minpercentchange=None, verbose=None, fastnoise=None, restart=None, savemodel=None, calcres=None, calcpsf=None, psfcutoff=None, parallel=None ): def noobj(s): if s.startswith('<') and s.endswith('>'): return "None" else: return s _prefile = os.path.realpath('ptclean6.pre') _postfile = os.path.realpath('ptclean6.last') _return_result_ = None _arguments = [vis,imageprefix,imagesuffix,ncpu,twidth,doreg,usephacenter,reftime,toTb,sclfactor,subregion,docompress,overwrite,selectdata,field,spw,timerange,uvrange,antenna,scan,observation,intent,datacolumn,imagename,imsize,cell,phasecenter,stokes,projection,startmodel,specmode,reffreq,nchan,start,width,outframe,veltype,restfreq,interpolation,perchanweightdensity,gridder,facets,psfphasecenter,wprojplanes,vptable,mosweight,aterm,psterm,wbawp,conjbeams,cfcache,usepointing,computepastep,rotatepastep,pointingoffsetsigdev,pblimit,normtype,deconvolver,scales,nterms,smallscalebias,restoration,restoringbeam,pbcor,outlierfile,weighting,robust,noise,npixels,uvtaper,niter,gain,threshold,nsigma,cycleniter,cyclefactor,minpsffraction,maxpsffraction,interactive,usemask,mask,pbmask,sidelobethreshold,noisethreshold,lownoisethreshold,negativethreshold,smoothfactor,minbeamfrac,cutthreshold,growiterations,dogrowprune,minpercentchange,verbose,fastnoise,restart,savemodel,calcres,calcpsf,psfcutoff,parallel] _invocation_parameters = OrderedDict( ) if any(map(lambda x: x is not None,_arguments)): # invoke python style # set the non sub-parameters that are not None local_global = { } if vis is not None: local_global['vis'] = vis if imageprefix is not None: local_global['imageprefix'] = imageprefix if imagesuffix is not None: local_global['imagesuffix'] = imagesuffix if ncpu is not None: local_global['ncpu'] = ncpu if twidth is not None: local_global['twidth'] = twidth if doreg is not None: local_global['doreg'] = doreg if overwrite is not None: local_global['overwrite'] = overwrite if selectdata is not None: local_global['selectdata'] = selectdata if datacolumn is not None: local_global['datacolumn'] = datacolumn if imagename is not None: local_global['imagename'] = imagename if imsize is not None: local_global['imsize'] = imsize if cell is not None: local_global['cell'] = cell if phasecenter is not None: local_global['phasecenter'] = phasecenter if stokes is not None: local_global['stokes'] = stokes if projection is not None: local_global['projection'] = projection if startmodel is not None: local_global['startmodel'] = startmodel if specmode is not None: local_global['specmode'] = specmode if gridder is not None: local_global['gridder'] = gridder if deconvolver is not None: local_global['deconvolver'] = deconvolver if restoration is not None: local_global['restoration'] = restoration if outlierfile is not None: local_global['outlierfile'] = outlierfile if weighting is not None: local_global['weighting'] = weighting if niter is not None: local_global['niter'] = niter if usemask is not None: local_global['usemask'] = usemask if fastnoise is not None: local_global['fastnoise'] = fastnoise if restart is not None: local_global['restart'] = restart if savemodel is not None: local_global['savemodel'] = savemodel if calcres is not None: local_global['calcres'] = calcres if calcpsf is not None: local_global['calcpsf'] = calcpsf if parallel is not None: local_global['parallel'] = parallel # the invocation parameters for the non-subparameters can now be set - this picks up those defaults _invocation_parameters['vis'] = self.__vis( local_global ) _invocation_parameters['imageprefix'] = self.__imageprefix( local_global ) _invocation_parameters['imagesuffix'] = self.__imagesuffix( local_global ) _invocation_parameters['ncpu'] = self.__ncpu( local_global ) _invocation_parameters['twidth'] = self.__twidth( local_global ) _invocation_parameters['doreg'] = self.__doreg( local_global ) _invocation_parameters['overwrite'] = self.__overwrite( local_global ) _invocation_parameters['selectdata'] = self.__selectdata( local_global ) _invocation_parameters['datacolumn'] = self.__datacolumn( local_global ) _invocation_parameters['imagename'] = self.__imagename( local_global ) _invocation_parameters['imsize'] = self.__imsize( local_global ) _invocation_parameters['cell'] = self.__cell( local_global ) _invocation_parameters['phasecenter'] = self.__phasecenter( local_global ) _invocation_parameters['stokes'] = self.__stokes( local_global ) _invocation_parameters['projection'] = self.__projection( local_global ) _invocation_parameters['startmodel'] = self.__startmodel( local_global ) _invocation_parameters['specmode'] = self.__specmode( local_global ) _invocation_parameters['gridder'] = self.__gridder( local_global ) _invocation_parameters['deconvolver'] = self.__deconvolver( local_global ) _invocation_parameters['restoration'] = self.__restoration( local_global ) _invocation_parameters['outlierfile'] = self.__outlierfile( local_global ) _invocation_parameters['weighting'] = self.__weighting( local_global ) _invocation_parameters['niter'] = self.__niter( local_global ) _invocation_parameters['usemask'] = self.__usemask( local_global ) _invocation_parameters['fastnoise'] = self.__fastnoise( local_global ) _invocation_parameters['restart'] = self.__restart( local_global ) _invocation_parameters['savemodel'] = self.__savemodel( local_global ) _invocation_parameters['calcres'] = self.__calcres( local_global ) _invocation_parameters['calcpsf'] = self.__calcpsf( local_global ) _invocation_parameters['parallel'] = self.__parallel( local_global ) # the sub-parameters can then be set. Use the supplied value if not None, else the function, which gets the appropriate default _invocation_parameters['usephacenter'] = self.__usephacenter( _invocation_parameters ) if usephacenter is None else usephacenter _invocation_parameters['reftime'] = self.__reftime( _invocation_parameters ) if reftime is None else reftime _invocation_parameters['toTb'] = self.__toTb( _invocation_parameters ) if toTb is None else toTb _invocation_parameters['sclfactor'] = self.__sclfactor( _invocation_parameters ) if sclfactor is None else sclfactor _invocation_parameters['subregion'] = self.__subregion( _invocation_parameters ) if subregion is None else subregion _invocation_parameters['docompress'] = self.__docompress( _invocation_parameters ) if docompress is None else docompress _invocation_parameters['field'] = self.__field( _invocation_parameters ) if field is None else field _invocation_parameters['spw'] = self.__spw( _invocation_parameters ) if spw is None else spw _invocation_parameters['timerange'] = self.__timerange( _invocation_parameters ) if timerange is None else timerange _invocation_parameters['uvrange'] = self.__uvrange( _invocation_parameters ) if uvrange is None else uvrange _invocation_parameters['antenna'] = self.__antenna( _invocation_parameters ) if antenna is None else antenna _invocation_parameters['scan'] = self.__scan( _invocation_parameters ) if scan is None else scan _invocation_parameters['observation'] = self.__observation( _invocation_parameters ) if observation is None else observation _invocation_parameters['intent'] = self.__intent( _invocation_parameters ) if intent is None else intent _invocation_parameters['reffreq'] = self.__reffreq( _invocation_parameters ) if reffreq is None else reffreq _invocation_parameters['nchan'] = self.__nchan( _invocation_parameters ) if nchan is None else nchan _invocation_parameters['start'] = self.__start( _invocation_parameters ) if start is None else start _invocation_parameters['width'] = self.__width( _invocation_parameters ) if width is None else width _invocation_parameters['outframe'] = self.__outframe( _invocation_parameters ) if outframe is None else outframe _invocation_parameters['veltype'] = self.__veltype( _invocation_parameters ) if veltype is None else veltype _invocation_parameters['restfreq'] = self.__restfreq( _invocation_parameters ) if restfreq is None else restfreq _invocation_parameters['interpolation'] = self.__interpolation( _invocation_parameters ) if interpolation is None else interpolation _invocation_parameters['perchanweightdensity'] = self.__perchanweightdensity( _invocation_parameters ) if perchanweightdensity is None else perchanweightdensity _invocation_parameters['facets'] = self.__facets( _invocation_parameters ) if facets is None else facets _invocation_parameters['psfphasecenter'] = self.__psfphasecenter( _invocation_parameters ) if psfphasecenter is None else psfphasecenter _invocation_parameters['wprojplanes'] = self.__wprojplanes( _invocation_parameters ) if wprojplanes is None else wprojplanes _invocation_parameters['vptable'] = self.__vptable( _invocation_parameters ) if vptable is None else vptable _invocation_parameters['mosweight'] = self.__mosweight( _invocation_parameters ) if mosweight is None else mosweight _invocation_parameters['aterm'] = self.__aterm( _invocation_parameters ) if aterm is None else aterm _invocation_parameters['psterm'] = self.__psterm( _invocation_parameters ) if psterm is None else psterm _invocation_parameters['wbawp'] = self.__wbawp( _invocation_parameters ) if wbawp is None else wbawp _invocation_parameters['conjbeams'] = self.__conjbeams( _invocation_parameters ) if conjbeams is None else conjbeams _invocation_parameters['cfcache'] = self.__cfcache( _invocation_parameters ) if cfcache is None else cfcache _invocation_parameters['usepointing'] = self.__usepointing( _invocation_parameters ) if usepointing is None else usepointing _invocation_parameters['computepastep'] = self.__computepastep( _invocation_parameters ) if computepastep is None else computepastep _invocation_parameters['rotatepastep'] = self.__rotatepastep( _invocation_parameters ) if rotatepastep is None else rotatepastep _invocation_parameters['pointingoffsetsigdev'] = self.__pointingoffsetsigdev( _invocation_parameters ) if pointingoffsetsigdev is None else pointingoffsetsigdev _invocation_parameters['pblimit'] = self.__pblimit( _invocation_parameters ) if pblimit is None else pblimit _invocation_parameters['normtype'] = self.__normtype( _invocation_parameters ) if normtype is None else normtype _invocation_parameters['scales'] = self.__scales( _invocation_parameters ) if scales is None else scales _invocation_parameters['nterms'] = self.__nterms( _invocation_parameters ) if nterms is None else nterms _invocation_parameters['smallscalebias'] = self.__smallscalebias( _invocation_parameters ) if smallscalebias is None else smallscalebias _invocation_parameters['restoringbeam'] = self.__restoringbeam( _invocation_parameters ) if restoringbeam is None else restoringbeam _invocation_parameters['pbcor'] = self.__pbcor( _invocation_parameters ) if pbcor is None else pbcor _invocation_parameters['robust'] = self.__robust( _invocation_parameters ) if robust is None else robust _invocation_parameters['noise'] = self.__noise( _invocation_parameters ) if noise is None else noise _invocation_parameters['npixels'] = self.__npixels( _invocation_parameters ) if npixels is None else npixels _invocation_parameters['uvtaper'] = self.__uvtaper( _invocation_parameters ) if uvtaper is None else uvtaper _invocation_parameters['gain'] = self.__gain( _invocation_parameters ) if gain is None else gain _invocation_parameters['threshold'] = self.__threshold( _invocation_parameters ) if threshold is None else threshold _invocation_parameters['nsigma'] = self.__nsigma( _invocation_parameters ) if nsigma is None else nsigma _invocation_parameters['cycleniter'] = self.__cycleniter( _invocation_parameters ) if cycleniter is None else cycleniter _invocation_parameters['cyclefactor'] = self.__cyclefactor( _invocation_parameters ) if cyclefactor is None else cyclefactor _invocation_parameters['minpsffraction'] = self.__minpsffraction( _invocation_parameters ) if minpsffraction is None else minpsffraction _invocation_parameters['maxpsffraction'] = self.__maxpsffraction( _invocation_parameters ) if maxpsffraction is None else maxpsffraction _invocation_parameters['interactive'] = self.__interactive( _invocation_parameters ) if interactive is None else interactive _invocation_parameters['mask'] = self.__mask( _invocation_parameters ) if mask is None else mask _invocation_parameters['pbmask'] = self.__pbmask( _invocation_parameters ) if pbmask is None else pbmask _invocation_parameters['sidelobethreshold'] = self.__sidelobethreshold( _invocation_parameters ) if sidelobethreshold is None else sidelobethreshold _invocation_parameters['noisethreshold'] = self.__noisethreshold( _invocation_parameters ) if noisethreshold is None else noisethreshold _invocation_parameters['lownoisethreshold'] = self.__lownoisethreshold( _invocation_parameters ) if lownoisethreshold is None else lownoisethreshold _invocation_parameters['negativethreshold'] = self.__negativethreshold( _invocation_parameters ) if negativethreshold is None else negativethreshold _invocation_parameters['smoothfactor'] = self.__smoothfactor( _invocation_parameters ) if smoothfactor is None else smoothfactor _invocation_parameters['minbeamfrac'] = self.__minbeamfrac( _invocation_parameters ) if minbeamfrac is None else minbeamfrac _invocation_parameters['cutthreshold'] = self.__cutthreshold( _invocation_parameters ) if cutthreshold is None else cutthreshold _invocation_parameters['growiterations'] = self.__growiterations( _invocation_parameters ) if growiterations is None else growiterations _invocation_parameters['dogrowprune'] = self.__dogrowprune( _invocation_parameters ) if dogrowprune is None else dogrowprune _invocation_parameters['minpercentchange'] = self.__minpercentchange( _invocation_parameters ) if minpercentchange is None else minpercentchange _invocation_parameters['verbose'] = self.__verbose( _invocation_parameters ) if verbose is None else verbose _invocation_parameters['psfcutoff'] = self.__psfcutoff( _invocation_parameters ) if psfcutoff is None else psfcutoff else: # invoke with inp/go semantics _invocation_parameters['vis'] = self.__vis( self.__globals_( ) ) _invocation_parameters['imageprefix'] = self.__imageprefix( self.__globals_( ) ) _invocation_parameters['imagesuffix'] = self.__imagesuffix( self.__globals_( ) ) _invocation_parameters['ncpu'] = self.__ncpu( self.__globals_( ) ) _invocation_parameters['twidth'] = self.__twidth( self.__globals_( ) ) _invocation_parameters['doreg'] = self.__doreg( self.__globals_( ) ) _invocation_parameters['usephacenter'] = self.__usephacenter( self.__globals_( ) ) _invocation_parameters['reftime'] = self.__reftime( self.__globals_( ) ) _invocation_parameters['toTb'] = self.__toTb( self.__globals_( ) ) _invocation_parameters['sclfactor'] = self.__sclfactor( self.__globals_( ) ) _invocation_parameters['subregion'] = self.__subregion( self.__globals_( ) ) _invocation_parameters['docompress'] = self.__docompress( self.__globals_( ) ) _invocation_parameters['overwrite'] = self.__overwrite( self.__globals_( ) ) _invocation_parameters['selectdata'] = self.__selectdata( self.__globals_( ) ) _invocation_parameters['field'] = self.__field( self.__globals_( ) ) _invocation_parameters['spw'] = self.__spw( self.__globals_( ) ) _invocation_parameters['timerange'] = self.__timerange( self.__globals_( ) ) _invocation_parameters['uvrange'] = self.__uvrange( self.__globals_( ) ) _invocation_parameters['antenna'] = self.__antenna( self.__globals_( ) ) _invocation_parameters['scan'] = self.__scan( self.__globals_( ) ) _invocation_parameters['observation'] = self.__observation( self.__globals_( ) ) _invocation_parameters['intent'] = self.__intent( self.__globals_( ) ) _invocation_parameters['datacolumn'] = self.__datacolumn( self.__globals_( ) ) _invocation_parameters['imagename'] = self.__imagename( self.__globals_( ) ) _invocation_parameters['imsize'] = self.__imsize( self.__globals_( ) ) _invocation_parameters['cell'] = self.__cell( self.__globals_( ) ) _invocation_parameters['phasecenter'] = self.__phasecenter( self.__globals_( ) ) _invocation_parameters['stokes'] = self.__stokes( self.__globals_( ) ) _invocation_parameters['projection'] = self.__projection( self.__globals_( ) ) _invocation_parameters['startmodel'] = self.__startmodel( self.__globals_( ) ) _invocation_parameters['specmode'] = self.__specmode( self.__globals_( ) ) _invocation_parameters['reffreq'] = self.__reffreq( self.__globals_( ) ) _invocation_parameters['nchan'] = self.__nchan( self.__globals_( ) ) _invocation_parameters['start'] = self.__start( self.__globals_( ) ) _invocation_parameters['width'] = self.__width( self.__globals_( ) ) _invocation_parameters['outframe'] = self.__outframe( self.__globals_( ) ) _invocation_parameters['veltype'] = self.__veltype( self.__globals_( ) ) _invocation_parameters['restfreq'] = self.__restfreq( self.__globals_( ) ) _invocation_parameters['interpolation'] = self.__interpolation( self.__globals_( ) ) _invocation_parameters['perchanweightdensity'] = self.__perchanweightdensity( self.__globals_( ) ) _invocation_parameters['gridder'] = self.__gridder( self.__globals_( ) ) _invocation_parameters['facets'] = self.__facets( self.__globals_( ) ) _invocation_parameters['psfphasecenter'] = self.__psfphasecenter( self.__globals_( ) ) _invocation_parameters['wprojplanes'] = self.__wprojplanes( self.__globals_( ) ) _invocation_parameters['vptable'] = self.__vptable( self.__globals_( ) ) _invocation_parameters['mosweight'] = self.__mosweight( self.__globals_( ) ) _invocation_parameters['aterm'] = self.__aterm( self.__globals_( ) ) _invocation_parameters['psterm'] = self.__psterm( self.__globals_( ) ) _invocation_parameters['wbawp'] = self.__wbawp( self.__globals_( ) ) _invocation_parameters['conjbeams'] = self.__conjbeams( self.__globals_( ) ) _invocation_parameters['cfcache'] = self.__cfcache( self.__globals_( ) ) _invocation_parameters['usepointing'] = self.__usepointing( self.__globals_( ) ) _invocation_parameters['computepastep'] = self.__computepastep( self.__globals_( ) ) _invocation_parameters['rotatepastep'] = self.__rotatepastep( self.__globals_( ) ) _invocation_parameters['pointingoffsetsigdev'] = self.__pointingoffsetsigdev( self.__globals_( ) ) _invocation_parameters['pblimit'] = self.__pblimit( self.__globals_( ) ) _invocation_parameters['normtype'] = self.__normtype( self.__globals_( ) ) _invocation_parameters['deconvolver'] = self.__deconvolver( self.__globals_( ) ) _invocation_parameters['scales'] = self.__scales( self.__globals_( ) ) _invocation_parameters['nterms'] = self.__nterms( self.__globals_( ) ) _invocation_parameters['smallscalebias'] = self.__smallscalebias( self.__globals_( ) ) _invocation_parameters['restoration'] = self.__restoration( self.__globals_( ) ) _invocation_parameters['restoringbeam'] = self.__restoringbeam( self.__globals_( ) ) _invocation_parameters['pbcor'] = self.__pbcor( self.__globals_( ) ) _invocation_parameters['outlierfile'] = self.__outlierfile( self.__globals_( ) ) _invocation_parameters['weighting'] = self.__weighting( self.__globals_( ) ) _invocation_parameters['robust'] = self.__robust( self.__globals_( ) ) _invocation_parameters['noise'] = self.__noise( self.__globals_( ) ) _invocation_parameters['npixels'] = self.__npixels( self.__globals_( ) ) _invocation_parameters['uvtaper'] = self.__uvtaper( self.__globals_( ) ) _invocation_parameters['niter'] = self.__niter( self.__globals_( ) ) _invocation_parameters['gain'] = self.__gain( self.__globals_( ) ) _invocation_parameters['threshold'] = self.__threshold( self.__globals_( ) ) _invocation_parameters['nsigma'] = self.__nsigma( self.__globals_( ) ) _invocation_parameters['cycleniter'] = self.__cycleniter( self.__globals_( ) ) _invocation_parameters['cyclefactor'] = self.__cyclefactor( self.__globals_( ) ) _invocation_parameters['minpsffraction'] = self.__minpsffraction( self.__globals_( ) ) _invocation_parameters['maxpsffraction'] = self.__maxpsffraction( self.__globals_( ) ) _invocation_parameters['interactive'] = self.__interactive( self.__globals_( ) ) _invocation_parameters['usemask'] = self.__usemask( self.__globals_( ) ) _invocation_parameters['mask'] = self.__mask( self.__globals_( ) ) _invocation_parameters['pbmask'] = self.__pbmask( self.__globals_( ) ) _invocation_parameters['sidelobethreshold'] = self.__sidelobethreshold( self.__globals_( ) ) _invocation_parameters['noisethreshold'] = self.__noisethreshold( self.__globals_( ) ) _invocation_parameters['lownoisethreshold'] = self.__lownoisethreshold( self.__globals_( ) ) _invocation_parameters['negativethreshold'] = self.__negativethreshold( self.__globals_( ) ) _invocation_parameters['smoothfactor'] = self.__smoothfactor( self.__globals_( ) ) _invocation_parameters['minbeamfrac'] = self.__minbeamfrac( self.__globals_( ) ) _invocation_parameters['cutthreshold'] = self.__cutthreshold( self.__globals_( ) ) _invocation_parameters['growiterations'] = self.__growiterations( self.__globals_( ) ) _invocation_parameters['dogrowprune'] = self.__dogrowprune( self.__globals_( ) ) _invocation_parameters['minpercentchange'] = self.__minpercentchange( self.__globals_( ) ) _invocation_parameters['verbose'] = self.__verbose( self.__globals_( ) ) _invocation_parameters['fastnoise'] = self.__fastnoise( self.__globals_( ) ) _invocation_parameters['restart'] = self.__restart( self.__globals_( ) ) _invocation_parameters['savemodel'] = self.__savemodel( self.__globals_( ) ) _invocation_parameters['calcres'] = self.__calcres( self.__globals_( ) ) _invocation_parameters['calcpsf'] = self.__calcpsf( self.__globals_( ) ) _invocation_parameters['psfcutoff'] = self.__psfcutoff( self.__globals_( ) ) _invocation_parameters['parallel'] = self.__parallel( self.__globals_( ) ) try: with open(_prefile,'w') as _f: for _i in _invocation_parameters: _f.write("%-20s = %s\n" % (_i,noobj(repr(_invocation_parameters[_i])))) _f.write("#ptclean6( ") count = 0 for _i in _invocation_parameters: _f.write("%s=%s" % (_i,noobj(repr(_invocation_parameters[_i])))) count += 1 if count < len(_invocation_parameters): _f.write(",") _f.write(" )\n") except: pass try: _return_result_ = _ptclean6_t( _invocation_parameters['vis'],_invocation_parameters['imageprefix'],_invocation_parameters['imagesuffix'],_invocation_parameters['ncpu'],_invocation_parameters['twidth'],_invocation_parameters['doreg'],_invocation_parameters['usephacenter'],_invocation_parameters['reftime'],_invocation_parameters['toTb'],_invocation_parameters['sclfactor'],_invocation_parameters['subregion'],_invocation_parameters['docompress'],_invocation_parameters['overwrite'],_invocation_parameters['selectdata'],_invocation_parameters['field'],_invocation_parameters['spw'],_invocation_parameters['timerange'],_invocation_parameters['uvrange'],_invocation_parameters['antenna'],_invocation_parameters['scan'],_invocation_parameters['observation'],_invocation_parameters['intent'],_invocation_parameters['datacolumn'],_invocation_parameters['imagename'],_invocation_parameters['imsize'],_invocation_parameters['cell'],_invocation_parameters['phasecenter'],_invocation_parameters['stokes'],_invocation_parameters['projection'],_invocation_parameters['startmodel'],_invocation_parameters['specmode'],_invocation_parameters['reffreq'],_invocation_parameters['nchan'],_invocation_parameters['start'],_invocation_parameters['width'],_invocation_parameters['outframe'],_invocation_parameters['veltype'],_invocation_parameters['restfreq'],_invocation_parameters['interpolation'],_invocation_parameters['perchanweightdensity'],_invocation_parameters['gridder'],_invocation_parameters['facets'],_invocation_parameters['psfphasecenter'],_invocation_parameters['wprojplanes'],_invocation_parameters['vptable'],_invocation_parameters['mosweight'],_invocation_parameters['aterm'],_invocation_parameters['psterm'],_invocation_parameters['wbawp'],_invocation_parameters['conjbeams'],_invocation_parameters['cfcache'],_invocation_parameters['usepointing'],_invocation_parameters['computepastep'],_invocation_parameters['rotatepastep'],_invocation_parameters['pointingoffsetsigdev'],_invocation_parameters['pblimit'],_invocation_parameters['normtype'],_invocation_parameters['deconvolver'],_invocation_parameters['scales'],_invocation_parameters['nterms'],_invocation_parameters['smallscalebias'],_invocation_parameters['restoration'],_invocation_parameters['restoringbeam'],_invocation_parameters['pbcor'],_invocation_parameters['outlierfile'],_invocation_parameters['weighting'],_invocation_parameters['robust'],_invocation_parameters['noise'],_invocation_parameters['npixels'],_invocation_parameters['uvtaper'],_invocation_parameters['niter'],_invocation_parameters['gain'],_invocation_parameters['threshold'],_invocation_parameters['nsigma'],_invocation_parameters['cycleniter'],_invocation_parameters['cyclefactor'],_invocation_parameters['minpsffraction'],_invocation_parameters['maxpsffraction'],_invocation_parameters['interactive'],_invocation_parameters['usemask'],_invocation_parameters['mask'],_invocation_parameters['pbmask'],_invocation_parameters['sidelobethreshold'],_invocation_parameters['noisethreshold'],_invocation_parameters['lownoisethreshold'],_invocation_parameters['negativethreshold'],_invocation_parameters['smoothfactor'],_invocation_parameters['minbeamfrac'],_invocation_parameters['cutthreshold'],_invocation_parameters['growiterations'],_invocation_parameters['dogrowprune'],_invocation_parameters['minpercentchange'],_invocation_parameters['verbose'],_invocation_parameters['fastnoise'],_invocation_parameters['restart'],_invocation_parameters['savemodel'],_invocation_parameters['calcres'],_invocation_parameters['calcpsf'],_invocation_parameters['psfcutoff'],_invocation_parameters['parallel'] ) except Exception as e: from traceback import format_exc from casatasks import casalog casalog.origin('ptclean6') casalog.post("Exception Reported: Error in ptclean6: %s" % str(e),'SEVERE') casalog.post(format_exc( )) _return_result_ = False try: os.rename(_prefile,_postfile) except: pass return _return_result_ ptclean6 = _ptclean6( )
""" This whole module and approach is a hack. It's not well documented because it's not official. But the horse is out of the barn on this stuff and we have to do something. """ import base64, hashlib, random, re, string, ssl from urllib.parse import urlencode, urlparse, parse_qs import aiohttp from . import logging as logger from ._version import get_version CHARS = f'{string.digits}{string.ascii_letters}' # When parsing the HTML output of the login page RE_INPUT = re.compile(r'<\s*input\s+[^>]*>', re.IGNORECASE) RE_NAME = re.compile(r'name\s*=\s*"?(?P<name>[^ "]+)?', re.IGNORECASE) RE_VALUE = re.compile(r'value\s*=\s*"?(?P<value>[^ "]+)?', re.IGNORECASE) MAX_FIELDS = 50 async def get_auth_data(identity, credential): """ Accepts identity (a.k.a email) and credential (a.k.a password) and returns your tokens. This currently does not support mfa. Returns: dict { "access_token": "<access_token>", "token_type": "bearer", "expires_in": <seconds>, # Have only seen 45 days "refresh_token": "<refresh_token>", "created_at": <epoch> } """ our_code, challenge1, challenge2 = _get_challengers() v = f'carson/{get_version().version}' async with aiohttp.ClientSession(headers={'User-Agent': v}) as session: issuer, their_code, redirect_uri = await get_and_post_login_page(session, identity, credential, challenge1, challenge2) tokens = await post_grant_authorization_code(session, issuer, our_code, their_code, redirect_uri, challenge2) return tokens async def get_and_post_login_page(session, identity, credential, challenge1, challenge2): # TODO: At one time, capping TLS to 1.2 was required but this is probably not needed anymore. ssl_context = ssl.create_default_context() ssl_context.maximum_version = ssl.TLSVersion.TLSv1_2 query = { 'client_id': 'ownerapi', 'code_challenge': challenge1, 'code_challenge_method': 'S256', 'locale': 'en', 'prompt': 'login', 'redirect_uri': 'https://auth.tesla.com/void/callback', 'response_type': 'code', 'scope': 'openid email offline_access', 'state': challenge2 } auth_url = 'https://auth-global.tesla.com/oauth2/v3/authorize' request_url = f'{auth_url}?{urlencode(query)}' response_url = None login_form = {} async with session.get(request_url, ssl=ssl_context) as response: # _debug_response(response) response_url = f'{response.request_info.url}' login_form = parse_html(await response.text()) login_form.update({'identity': identity, 'credential': credential}) async with session.post(response_url, data=login_form, ssl=ssl_context, allow_redirects=False) as response: # _debug_response(response) _loc = response.headers.get('location') txt = await response.text() if not _loc: if 'captcha' in txt: raise Exception('Looks like captcha is required.') raise Exception('Did not get a redirect from posting credentials') location = urlparse(_loc) mfa = '/oauth2/v3/authorize/mfa/verify' in txt assert not mfa, 'Not supporting MFA at this time.' query = parse_qs(location.query) their_code = query.get('code', [None])[0] issuer = query.get('issuer', [None])[0] if not their_code: raise Exception('Did not get a code back from posting credentials.') redirect_uri = f'{location.scheme}://{location.netloc}{location.path}' return issuer, their_code, redirect_uri async def post_grant_authorization_code(session, issuer, our_code, their_code, redirect_uri, challenge2): form = { 'grant_type': 'authorization_code', 'client_id': 'ownerapi', 'code_verifier': our_code, 'code': their_code, 'redirect_uri': redirect_uri } issuer = issuer or 'https://auth.tesla.com/oauth2/v3' issuer_url = f'{issuer}/token' async with session.post(issuer_url, json=form) as response: tokens = await response.json() if 'state' not in tokens or str(tokens['state']) != challenge2: logger.error(f'Returned state ({tokens.get('state', None)!r}) did not match expected value ({challenge2!r})') raise Exception('Returned authorization_code state did not match expected value') # post_grant_jwt url = 'https://owner-api.teslamotors.com/oauth/token' headers = {'Authorization': f'Bearer {tokens.get('access_token')}'} form = { 'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer', 'client_id': _DISCOVERED_VAL, } async with session.post(url, headers=headers, json=form) as response: # _debug_response(response) return await response.json() def parse_html(body_src): form = {} field_nbr = 0 input_match = RE_INPUT.search(body_src) while input_match: field_nbr += 1 if field_nbr > MAX_FIELDS: raise ValueError(f'Too many input fields found. max={MAX_FIELDS:,}') b, e = input_match.span() input_src, body_src = body_src[b:e], body_src[e:] input_match = RE_INPUT.search(body_src) name_match = RE_NAME.search(input_src) if not name_match or not name_match.group('name'): continue value_match = RE_VALUE.search(input_src) form[name_match.group('name')] = value_match and value_match.group('value') or '' return form def _get_challengers(): code = ''.join(random.choices(CHARS, k=112)) sha = hashlib.sha256() sha.update(code.encode()) challenge1 = base64.b64encode(sha.digest(), altchars=b'-_').decode().replace('=', '') sha.update(''.join(random.choices(CHARS, k=112)).encode()) challenge2 = base64.b64encode(sha.digest(), altchars=b'-_').decode().replace('=', '') return code, challenge1, challenge2 _DISCOVERED_VAL = ''.join(hex(int(val))[2:] for val in """ 08 01 05 02 07 12 15 15 00 06 08 04 03 12 08 06 03 04 15 13 12 00 09 14 08 10 12 00 10 11 14 15 11 04 06 10 12 08 04 09 15 03 08 15 14 01 14 04 03 01 12 02 14 15 02 01 00 06 07 09 06 03 08 04 """.split()) def _debug_response(response): req = response.request_info logger.debug('Request') logger.debug(f'{req.method} {req.url}') for key, val in req.headers.items(): logger.debug(f'{key}: {val}') logger.debug('') logger.debug('Response') logger.debug(f'HTTP {response.status} {response.reason}') for key, val in response.raw_headers: logger.debug(f'{key.decode()}: {val.decode()}') logger.debug('')
""" This whole module and approach is a hack. It's not well documented because it's not official. But the horse is out of the barn on this stuff and we have to do something. """ import base64, hashlib, random, re, string, ssl from urllib.parse import urlencode, urlparse, parse_qs import aiohttp from . import logging as logger from ._version import get_version CHARS = f'{string.digits}{string.ascii_letters}' # When parsing the HTML output of the login page RE_INPUT = re.compile(r'<\s*input\s+[^>]*>', re.IGNORECASE) RE_NAME = re.compile(r'name\s*=\s*"?(?P<name>[^ "]+)?', re.IGNORECASE) RE_VALUE = re.compile(r'value\s*=\s*"?(?P<value>[^ "]+)?', re.IGNORECASE) MAX_FIELDS = 50 async def get_auth_data(identity, credential): """ Accepts identity (a.k.a email) and credential (a.k.a password) and returns your tokens. This currently does not support mfa. Returns: dict { "access_token": "<access_token>", "token_type": "bearer", "expires_in": <seconds>, # Have only seen 45 days "refresh_token": "<refresh_token>", "created_at": <epoch> } """ our_code, challenge1, challenge2 = _get_challengers() v = f'carson/{get_version().version}' async with aiohttp.ClientSession(headers={'User-Agent': v}) as session: issuer, their_code, redirect_uri = await get_and_post_login_page(session, identity, credential, challenge1, challenge2) tokens = await post_grant_authorization_code(session, issuer, our_code, their_code, redirect_uri, challenge2) return tokens async def get_and_post_login_page(session, identity, credential, challenge1, challenge2): # TODO: At one time, capping TLS to 1.2 was required but this is probably not needed anymore. ssl_context = ssl.create_default_context() ssl_context.maximum_version = ssl.TLSVersion.TLSv1_2 query = { 'client_id': 'ownerapi', 'code_challenge': challenge1, 'code_challenge_method': 'S256', 'locale': 'en', 'prompt': 'login', 'redirect_uri': 'https://auth.tesla.com/void/callback', 'response_type': 'code', 'scope': 'openid email offline_access', 'state': challenge2 } auth_url = 'https://auth-global.tesla.com/oauth2/v3/authorize' request_url = f'{auth_url}?{urlencode(query)}' response_url = None login_form = {} async with session.get(request_url, ssl=ssl_context) as response: # _debug_response(response) response_url = f'{response.request_info.url}' login_form = parse_html(await response.text()) login_form.update({'identity': identity, 'credential': credential}) async with session.post(response_url, data=login_form, ssl=ssl_context, allow_redirects=False) as response: # _debug_response(response) _loc = response.headers.get('location') txt = await response.text() if not _loc: if 'captcha' in txt: raise Exception('Looks like captcha is required.') raise Exception('Did not get a redirect from posting credentials') location = urlparse(_loc) mfa = '/oauth2/v3/authorize/mfa/verify' in txt assert not mfa, 'Not supporting MFA at this time.' query = parse_qs(location.query) their_code = query.get('code', [None])[0] issuer = query.get('issuer', [None])[0] if not their_code: raise Exception('Did not get a code back from posting credentials.') redirect_uri = f'{location.scheme}://{location.netloc}{location.path}' return issuer, their_code, redirect_uri async def post_grant_authorization_code(session, issuer, our_code, their_code, redirect_uri, challenge2): form = { 'grant_type': 'authorization_code', 'client_id': 'ownerapi', 'code_verifier': our_code, 'code': their_code, 'redirect_uri': redirect_uri } issuer = issuer or 'https://auth.tesla.com/oauth2/v3' issuer_url = f'{issuer}/token' async with session.post(issuer_url, json=form) as response: tokens = await response.json() if 'state' not in tokens or str(tokens['state']) != challenge2: logger.error(f'Returned state ({tokens.get("state", None)!r}) did not match expected value ({challenge2!r})') raise Exception('Returned authorization_code state did not match expected value') # post_grant_jwt url = 'https://owner-api.teslamotors.com/oauth/token' headers = {'Authorization': f'Bearer {tokens.get("access_token")}'} form = { 'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer', 'client_id': _DISCOVERED_VAL, } async with session.post(url, headers=headers, json=form) as response: # _debug_response(response) return await response.json() def parse_html(body_src): form = {} field_nbr = 0 input_match = RE_INPUT.search(body_src) while input_match: field_nbr += 1 if field_nbr > MAX_FIELDS: raise ValueError(f'Too many input fields found. max={MAX_FIELDS:,}') b, e = input_match.span() input_src, body_src = body_src[b:e], body_src[e:] input_match = RE_INPUT.search(body_src) name_match = RE_NAME.search(input_src) if not name_match or not name_match.group('name'): continue value_match = RE_VALUE.search(input_src) form[name_match.group('name')] = value_match and value_match.group('value') or '' return form def _get_challengers(): code = ''.join(random.choices(CHARS, k=112)) sha = hashlib.sha256() sha.update(code.encode()) challenge1 = base64.b64encode(sha.digest(), altchars=b'-_').decode().replace('=', '') sha.update(''.join(random.choices(CHARS, k=112)).encode()) challenge2 = base64.b64encode(sha.digest(), altchars=b'-_').decode().replace('=', '') return code, challenge1, challenge2 _DISCOVERED_VAL = ''.join(hex(int(val))[2:] for val in """ 08 01 05 02 07 12 15 15 00 06 08 04 03 12 08 06 03 04 15 13 12 00 09 14 08 10 12 00 10 11 14 15 11 04 06 10 12 08 04 09 15 03 08 15 14 01 14 04 03 01 12 02 14 15 02 01 00 06 07 09 06 03 08 04 """.split()) def _debug_response(response): req = response.request_info logger.debug('Request') logger.debug(f'{req.method} {req.url}') for key, val in req.headers.items(): logger.debug(f'{key}: {val}') logger.debug('') logger.debug('Response') logger.debug(f'HTTP {response.status} {response.reason}') for key, val in response.raw_headers: logger.debug(f'{key.decode()}: {val.decode()}') logger.debug('')
import argparse import logging from typing import Tuple from deepdiff import DeepDiff import requests from requests.adapters import HTTPAdapter from urllib3.util import Retry from urllib.parse import quote import pprint from concurrent.futures import ThreadPoolExecutor import os import random import math def diff_response(args: Tuple[str, str]): # Endpoint # /cpes/:vendor/:product path = f'cpes/{args[0]}/{args[1]}' session = requests.Session() retries = Retry(total=5, backoff_factor=1, status_forcelist=[503, 504]) session.mount("http://", HTTPAdapter(max_retries=retries)) try: response_old = requests.get( f'http://127.0.0.1:1325/{path}', timeout=(2.0, 30.0)).json() response_new = requests.get( f'http://127.0.0.1:1326/{path}', timeout=(2.0, 30.0)).json() except requests.exceptions.ConnectionError as e: logger.error(f'Failed to Connection..., err: {e}') exit(1) except requests.exceptions.ReadTimeout as e: logger.error( f'Failed to Read Response..., err: {e}, args: {args}') except Exception as e: logger.error(f'Failed to GET request..., err: {e}') exit(1) diff = DeepDiff(response_old, response_new, ignore_order=True) if diff != {}: logger.warning( f'There is a difference between old and new(or RDB and Redis):\n {pprint.pformat({'mode': 'cpes', 'args': args, 'diff': diff}, indent=2)}') parser = argparse.ArgumentParser() parser.add_argument('mode', choices=['cpes'], help='Specify the mode to test.') parser.add_argument("--sample_rate", type=float, default=0.001, help="Adjust the rate of data used for testing (len(test_data) * sample_rate)") parser.add_argument( '--debug', action=argparse.BooleanOptionalAction, help='print debug message') args = parser.parse_args() logger = logging.getLogger(__name__) stream_handler = logging.StreamHandler() if args.debug: logger.setLevel(logging.DEBUG) stream_handler.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) stream_handler.setLevel(logging.INFO) formatter = logging.Formatter( '%(levelname)s[%(asctime)s] %(message)s', "%m-%d|%H:%M:%S") stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) logger.info( f'start server mode test(mode: {args.mode})') list_path = f"integration/cpe.txt" if not os.path.isfile(list_path): logger.error(f'Failed to find list path..., list_path: {list_path}') exit(1) with open(list_path) as f: list = [s.strip().split("|", 1) for s in f.readlines()] list = random.sample(list, math.ceil(len(list) * args.sample_rate)) with ThreadPoolExecutor() as executor: ins = ((e[0], e[1]) for e in list) executor.map(diff_response, ins)
import argparse import logging from typing import Tuple from deepdiff import DeepDiff import requests from requests.adapters import HTTPAdapter from urllib3.util import Retry from urllib.parse import quote import pprint from concurrent.futures import ThreadPoolExecutor import os import random import math def diff_response(args: Tuple[str, str]): # Endpoint # /cpes/:vendor/:product path = f'cpes/{args[0]}/{args[1]}' session = requests.Session() retries = Retry(total=5, backoff_factor=1, status_forcelist=[503, 504]) session.mount("http://", HTTPAdapter(max_retries=retries)) try: response_old = requests.get( f'http://127.0.0.1:1325/{path}', timeout=(2.0, 30.0)).json() response_new = requests.get( f'http://127.0.0.1:1326/{path}', timeout=(2.0, 30.0)).json() except requests.exceptions.ConnectionError as e: logger.error(f'Failed to Connection..., err: {e}') exit(1) except requests.exceptions.ReadTimeout as e: logger.error( f'Failed to Read Response..., err: {e}, args: {args}') except Exception as e: logger.error(f'Failed to GET request..., err: {e}') exit(1) diff = DeepDiff(response_old, response_new, ignore_order=True) if diff != {}: logger.warning( f'There is a difference between old and new(or RDB and Redis):\n {pprint.pformat({"mode": "cpes", "args": args, "diff": diff}, indent=2)}') parser = argparse.ArgumentParser() parser.add_argument('mode', choices=['cpes'], help='Specify the mode to test.') parser.add_argument("--sample_rate", type=float, default=0.001, help="Adjust the rate of data used for testing (len(test_data) * sample_rate)") parser.add_argument( '--debug', action=argparse.BooleanOptionalAction, help='print debug message') args = parser.parse_args() logger = logging.getLogger(__name__) stream_handler = logging.StreamHandler() if args.debug: logger.setLevel(logging.DEBUG) stream_handler.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) stream_handler.setLevel(logging.INFO) formatter = logging.Formatter( '%(levelname)s[%(asctime)s] %(message)s', "%m-%d|%H:%M:%S") stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) logger.info( f'start server mode test(mode: {args.mode})') list_path = f"integration/cpe.txt" if not os.path.isfile(list_path): logger.error(f'Failed to find list path..., list_path: {list_path}') exit(1) with open(list_path) as f: list = [s.strip().split("|", 1) for s in f.readlines()] list = random.sample(list, math.ceil(len(list) * args.sample_rate)) with ThreadPoolExecutor() as executor: ins = ((e[0], e[1]) for e in list) executor.map(diff_response, ins)
"""Pair plot between variables of latent space""" from configparser import ConfigParser, ExtendedInterpolation import glob import time import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from sdss.utils.managefiles import FileDirectory from sdss.utils.configfile import ConfigurationFile ############################################################################### start_time = time.time() ############################################################################### parser = ConfigParser(interpolation=ExtendedInterpolation()) config_file_name = "pair_plots.ini" parser.read(f"{config_file_name}") config = ConfigurationFile() manage_files = FileDirectory() ############################################################################### print(f"Load metadata", end="\n") data_directory = parser.get("directory", "data") science_df = parser.get("file", "science") science_df = pd.read_csv( f"{data_directory}/{science_df}", index_col="specobjid" ) bin_directory = parser.get("directory", "bin_data") specobjid_name = parser.get("file", "specobjid") specobjid = np.load(f"{bin_directory}/{specobjid_name}") bin_df = science_df.loc[specobjid[:, 1]] del science_df ############################################################################### print(f"Load embedding and latent representations", end="\n") latent_directory = parser.get("directory", "latent") latent_directories = glob.glob(f"{latent_directory}/*/") latent_name = parser.get("file", "latent") _ = [ manage_files.file_exists( f"{latent_location}/{latent_name}", exit_program=True ) for latent_location in latent_directories ] bin_id = parser.get("common", "bin") # set plot parameters parameters_of_plot = config.section_to_dictionary( parser.items("plot"), value_separators=[","] ) size = config.entry_to_list(parser.get("plot", "size"), float, ",") size = tuple(size) parameters_of_plot["size"] = size fig, ax = plt.subplots(figsize=size, tight_layout=True) # flags number_latent_variables = None bin_df_of_plot = None models_ids = [model_id.split("/")[-2] for model_id in latent_directories] for model_idx, latent_directory in enumerate(latent_directories): latent = np.load(f"{latent_directory}/{latent_name}") number_latent_variables = latent.shape[1] # load latent representation to data frame for idx in range(number_latent_variables): bin_df[f"{idx:02d}Latent"] = latent[:, idx] print(f"model {models_ids[model_idx]}: pair plots", end="\n") for hue in parameters_of_plot["hues"]: bin_df_of_plot = bin_df[bin_df[hue] != "undefined"] for latent_x in range(number_latent_variables): for latent_y in range(latent_x, number_latent_variables): if latent_x == latent_y: continue print( f"Pair plot: {latent_x:02d} vs {latent_y:02d}" f"Hue: {hue}", end="\r", ) # pair_plot = sns.scatterplot( sns.scatterplot( x=f"{latent_x:02d}Latent", y=f"{latent_y:02d}Latent", ax=ax, data=bin_df_of_plot, hue=hue, alpha=parameters_of_plot["alpha"], s=parameters_of_plot["marker_size"], edgecolors=parameters_of_plot["edgecolors"], ) save_to = f"{latent_directory}/pair_plots" manage_files.check_directory(save_to, exit_program=False) fig.savefig( f"{save_to}/" f"pair_{latent_x:02d}_{latent_y:02d}_" f"{hue}.{parameters_of_plot["format"]}" ) ax.clear() ########################################################################### print(f"Save configuration file", end="\n") with open(f"{latent_directory}/{config_file_name}", "w") as config_file: parser.write(config_file) ############################################################################### finish_time = time.time() print(f"\nRun time: {finish_time - start_time:.2f}")
"""Pair plot between variables of latent space""" from configparser import ConfigParser, ExtendedInterpolation import glob import time import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from sdss.utils.managefiles import FileDirectory from sdss.utils.configfile import ConfigurationFile ############################################################################### start_time = time.time() ############################################################################### parser = ConfigParser(interpolation=ExtendedInterpolation()) config_file_name = "pair_plots.ini" parser.read(f"{config_file_name}") config = ConfigurationFile() manage_files = FileDirectory() ############################################################################### print(f"Load metadata", end="\n") data_directory = parser.get("directory", "data") science_df = parser.get("file", "science") science_df = pd.read_csv( f"{data_directory}/{science_df}", index_col="specobjid" ) bin_directory = parser.get("directory", "bin_data") specobjid_name = parser.get("file", "specobjid") specobjid = np.load(f"{bin_directory}/{specobjid_name}") bin_df = science_df.loc[specobjid[:, 1]] del science_df ############################################################################### print(f"Load embedding and latent representations", end="\n") latent_directory = parser.get("directory", "latent") latent_directories = glob.glob(f"{latent_directory}/*/") latent_name = parser.get("file", "latent") _ = [ manage_files.file_exists( f"{latent_location}/{latent_name}", exit_program=True ) for latent_location in latent_directories ] bin_id = parser.get("common", "bin") # set plot parameters parameters_of_plot = config.section_to_dictionary( parser.items("plot"), value_separators=[","] ) size = config.entry_to_list(parser.get("plot", "size"), float, ",") size = tuple(size) parameters_of_plot["size"] = size fig, ax = plt.subplots(figsize=size, tight_layout=True) # flags number_latent_variables = None bin_df_of_plot = None models_ids = [model_id.split("/")[-2] for model_id in latent_directories] for model_idx, latent_directory in enumerate(latent_directories): latent = np.load(f"{latent_directory}/{latent_name}") number_latent_variables = latent.shape[1] # load latent representation to data frame for idx in range(number_latent_variables): bin_df[f"{idx:02d}Latent"] = latent[:, idx] print(f"model {models_ids[model_idx]}: pair plots", end="\n") for hue in parameters_of_plot["hues"]: bin_df_of_plot = bin_df[bin_df[hue] != "undefined"] for latent_x in range(number_latent_variables): for latent_y in range(latent_x, number_latent_variables): if latent_x == latent_y: continue print( f"Pair plot: {latent_x:02d} vs {latent_y:02d}" f"Hue: {hue}", end="\r", ) # pair_plot = sns.scatterplot( sns.scatterplot( x=f"{latent_x:02d}Latent", y=f"{latent_y:02d}Latent", ax=ax, data=bin_df_of_plot, hue=hue, alpha=parameters_of_plot["alpha"], s=parameters_of_plot["marker_size"], edgecolors=parameters_of_plot["edgecolors"], ) save_to = f"{latent_directory}/pair_plots" manage_files.check_directory(save_to, exit_program=False) fig.savefig( f"{save_to}/" f"pair_{latent_x:02d}_{latent_y:02d}_" f"{hue}.{parameters_of_plot['format']}" ) ax.clear() ########################################################################### print(f"Save configuration file", end="\n") with open(f"{latent_directory}/{config_file_name}", "w") as config_file: parser.write(config_file) ############################################################################### finish_time = time.time() print(f"\nRun time: {finish_time - start_time:.2f}")
#!/usr/bin/env python3 """Generate an updated requirements_all.txt.""" import difflib import importlib import os from pathlib import Path import pkgutil import re import sys from homeassistant.util.yaml.loader import load_yaml from script.hassfest.model import Integration COMMENT_REQUIREMENTS = ( "Adafruit_BBIO", "avea", # depends on bluepy "avion", "beacontools", "beewi_smartclim", # depends on bluepy "blinkt", "bluepy", "bme680", "decora", "decora_wifi", "envirophat", "evdev", "face_recognition", "i2csense", "opencv-python-headless", "pybluez", "pycups", "PySwitchbot", "pySwitchmate", "python-eq3bt", "python-gammu", "python-lirc", "pyuserinput", "raspihats", "rpi-rf", "RPi.GPIO", "smbus-cffi", "tensorflow", "tf-models-official", "VL53L1X2", ) IGNORE_PIN = ("colorlog>2.1,<3", "urllib3") URL_PIN = ( "https://developers.home-assistant.io/docs/" "creating_platform_code_review.html#1-requirements" ) CONSTRAINT_PATH = os.path.join( os.path.dirname(__file__), "../homeassistant/package_constraints.txt" ) CONSTRAINT_BASE = """ pycryptodome>=3.6.6 # Constrain urllib3 to ensure we deal with CVE-2019-11236 & CVE-2019-11324 urllib3>=1.24.3 # Constrain H11 to ensure we get a new enough version to support non-rfc line endings h11>=0.12.0 # Constrain httplib2 to protect against GHSA-93xj-8mrv-444m # https://github.com/advisories/GHSA-93xj-8mrv-444m httplib2>=0.19.0 # This is a old unmaintained library and is replaced with pycryptodome pycrypto==1000000000.0.0 # To remove reliance on typing btlewrap>=0.0.10 # This overrides a built-in Python package enum34==1000000000.0.0 typing==1000000000.0.0 uuid==1000000000.0.0 """ IGNORE_PRE_COMMIT_HOOK_ID = ( "check-executables-have-shebangs", "check-json", "no-commit-to-branch", "prettier", "python-typing-update", ) def has_tests(module: str): """Test if a module has tests. Module format: homeassistant.components.hue Test if exists: tests/components/hue """ path = Path(module.replace(".", "/").replace("homeassistant", "tests")) if not path.exists(): return False if not path.is_dir(): return True # Dev environments might have stale directories around # from removed tests. Check for that. content = [f.name for f in path.glob("*")] # Directories need to contain more than `__pycache__` # to exist in Git and so be seen by CI. return content != ["__pycache__"] def explore_module(package, explore_children): """Explore the modules.""" module = importlib.import_module(package) found = [] if not hasattr(module, "__path__"): return found for _, name, _ in pkgutil.iter_modules(module.__path__, f"{package}."): found.append(name) if explore_children: found.extend(explore_module(name, False)) return found def core_requirements(): """Gather core requirements out of setup.py.""" reqs_raw = re.search( r"REQUIRES = \[(.*?)\]", Path("setup.py").read_text(), re.S ).group(1) return [x[1] for x in re.findall(r"(['\"])(.*?)\1", reqs_raw)] def gather_recursive_requirements(domain, seen=None): """Recursively gather requirements from a module.""" if seen is None: seen = set() seen.add(domain) integration = Integration(Path(f"homeassistant/components/{domain}")) integration.load_manifest() reqs = set(integration.requirements) for dep_domain in integration.dependencies: reqs.update(gather_recursive_requirements(dep_domain, seen)) return reqs def comment_requirement(req): """Comment out requirement. Some don't install on all systems.""" return any(ign.lower() in req.lower() for ign in COMMENT_REQUIREMENTS) def gather_modules(): """Collect the information.""" reqs = {} errors = [] gather_requirements_from_manifests(errors, reqs) gather_requirements_from_modules(errors, reqs) for key in reqs: reqs[key] = sorted(reqs[key], key=lambda name: (len(name.split(".")), name)) if errors: print("******* ERROR") print("Errors while importing: ", ", ".join(errors)) return None return reqs def gather_requirements_from_manifests(errors, reqs): """Gather all of the requirements from manifests.""" integrations = Integration.load_dir(Path("homeassistant/components")) for domain in sorted(integrations): integration = integrations[domain] if not integration.manifest: errors.append(f"The manifest for integration {domain} is invalid.") continue if integration.disabled: continue process_requirements( errors, integration.requirements, f"homeassistant.components.{domain}", reqs ) def gather_requirements_from_modules(errors, reqs): """Collect the requirements from the modules directly.""" for package in sorted( explore_module("homeassistant.scripts", True) + explore_module("homeassistant.auth", True) ): try: module = importlib.import_module(package) except ImportError as err: print(f"{package.replace(".", "/")}.py: {err}") errors.append(package) continue if getattr(module, "REQUIREMENTS", None): process_requirements(errors, module.REQUIREMENTS, package, reqs) def process_requirements(errors, module_requirements, package, reqs): """Process all of the requirements.""" for req in module_requirements: if "://" in req: errors.append(f"{package}[Only pypi dependencies are allowed: {req}]") if req.partition("==")[1] == "" and req not in IGNORE_PIN: errors.append(f"{package}[Please pin requirement {req}, see {URL_PIN}]") reqs.setdefault(req, []).append(package) def generate_requirements_list(reqs): """Generate a pip file based on requirements.""" output = [] for pkg, requirements in sorted(reqs.items(), key=lambda item: item[0]): for req in sorted(requirements): output.append(f"\n# {req}") if comment_requirement(pkg): output.append(f"\n# {pkg}\n") else: output.append(f"\n{pkg}\n") return "".join(output) def requirements_output(reqs): """Generate output for requirements.""" output = [ "-c homeassistant/package_constraints.txt\n", "\n", "# Home Assistant Core\n", ] output.append("\n".join(core_requirements())) output.append("\n") return "".join(output) def requirements_all_output(reqs): """Generate output for requirements_all.""" output = [ "# Home Assistant Core, full dependency set\n", "-r requirements.txt\n", ] output.append(generate_requirements_list(reqs)) return "".join(output) def requirements_test_all_output(reqs): """Generate output for test_requirements.""" output = [ "# Home Assistant tests, full dependency set\n", f"# Automatically generated by {Path(__file__).name}, do not edit\n", "\n", "-r requirements_test.txt\n", ] filtered = { requirement: modules for requirement, modules in reqs.items() if any( # Always install requirements that are not part of integrations not mdl.startswith("homeassistant.components.") or # Install tests for integrations that have tests has_tests(mdl) for mdl in modules ) } output.append(generate_requirements_list(filtered)) return "".join(output) def requirements_pre_commit_output(): """Generate output for pre-commit dependencies.""" source = ".pre-commit-config.yaml" pre_commit_conf = load_yaml(source) reqs = [] for repo in (x for x in pre_commit_conf["repos"] if x.get("rev")): for hook in repo["hooks"]: if hook["id"] not in IGNORE_PRE_COMMIT_HOOK_ID: reqs.append(f"{hook["id"]}=={repo["rev"].lstrip("v")}") reqs.extend(x for x in hook.get("additional_dependencies", ())) output = [ f"# Automatically generated " f"from {source} by {Path(__file__).name}, do not edit", "", ] output.extend(sorted(reqs)) return "\n".join(output) + "\n" def gather_constraints(): """Construct output for constraint file.""" return ( "\n".join( sorted( { *core_requirements(), *gather_recursive_requirements("default_config"), *gather_recursive_requirements("mqtt"), } ) + [""] ) + CONSTRAINT_BASE ) def diff_file(filename, content): """Diff a file.""" return list( difflib.context_diff( [f"{line}\n" for line in Path(filename).read_text().split("\n")], [f"{line}\n" for line in content.split("\n")], filename, "generated", ) ) def main(validate): """Run the script.""" if not os.path.isfile("requirements_all.txt"): print("Run this from HA root dir") return 1 data = gather_modules() if data is None: return 1 reqs_file = requirements_output(data) reqs_all_file = requirements_all_output(data) reqs_test_all_file = requirements_test_all_output(data) reqs_pre_commit_file = requirements_pre_commit_output() constraints = gather_constraints() files = ( ("requirements.txt", reqs_file), ("requirements_all.txt", reqs_all_file), ("requirements_test_pre_commit.txt", reqs_pre_commit_file), ("requirements_test_all.txt", reqs_test_all_file), ("homeassistant/package_constraints.txt", constraints), ) if validate: errors = [] for filename, content in files: diff = diff_file(filename, content) if diff: errors.append("".join(diff)) if errors: print("ERROR - FOUND THE FOLLOWING DIFFERENCES") print() print() print("\n\n".join(errors)) print() print("Please run python3 -m script.gen_requirements_all") return 1 return 0 for filename, content in files: Path(filename).write_text(content) return 0 if __name__ == "__main__": _VAL = sys.argv[-1] == "validate" sys.exit(main(_VAL))
#!/usr/bin/env python3 """Generate an updated requirements_all.txt.""" import difflib import importlib import os from pathlib import Path import pkgutil import re import sys from homeassistant.util.yaml.loader import load_yaml from script.hassfest.model import Integration COMMENT_REQUIREMENTS = ( "Adafruit_BBIO", "avea", # depends on bluepy "avion", "beacontools", "beewi_smartclim", # depends on bluepy "blinkt", "bluepy", "bme680", "decora", "decora_wifi", "envirophat", "evdev", "face_recognition", "i2csense", "opencv-python-headless", "pybluez", "pycups", "PySwitchbot", "pySwitchmate", "python-eq3bt", "python-gammu", "python-lirc", "pyuserinput", "raspihats", "rpi-rf", "RPi.GPIO", "smbus-cffi", "tensorflow", "tf-models-official", "VL53L1X2", ) IGNORE_PIN = ("colorlog>2.1,<3", "urllib3") URL_PIN = ( "https://developers.home-assistant.io/docs/" "creating_platform_code_review.html#1-requirements" ) CONSTRAINT_PATH = os.path.join( os.path.dirname(__file__), "../homeassistant/package_constraints.txt" ) CONSTRAINT_BASE = """ pycryptodome>=3.6.6 # Constrain urllib3 to ensure we deal with CVE-2019-11236 & CVE-2019-11324 urllib3>=1.24.3 # Constrain H11 to ensure we get a new enough version to support non-rfc line endings h11>=0.12.0 # Constrain httplib2 to protect against GHSA-93xj-8mrv-444m # https://github.com/advisories/GHSA-93xj-8mrv-444m httplib2>=0.19.0 # This is a old unmaintained library and is replaced with pycryptodome pycrypto==1000000000.0.0 # To remove reliance on typing btlewrap>=0.0.10 # This overrides a built-in Python package enum34==1000000000.0.0 typing==1000000000.0.0 uuid==1000000000.0.0 """ IGNORE_PRE_COMMIT_HOOK_ID = ( "check-executables-have-shebangs", "check-json", "no-commit-to-branch", "prettier", "python-typing-update", ) def has_tests(module: str): """Test if a module has tests. Module format: homeassistant.components.hue Test if exists: tests/components/hue """ path = Path(module.replace(".", "/").replace("homeassistant", "tests")) if not path.exists(): return False if not path.is_dir(): return True # Dev environments might have stale directories around # from removed tests. Check for that. content = [f.name for f in path.glob("*")] # Directories need to contain more than `__pycache__` # to exist in Git and so be seen by CI. return content != ["__pycache__"] def explore_module(package, explore_children): """Explore the modules.""" module = importlib.import_module(package) found = [] if not hasattr(module, "__path__"): return found for _, name, _ in pkgutil.iter_modules(module.__path__, f"{package}."): found.append(name) if explore_children: found.extend(explore_module(name, False)) return found def core_requirements(): """Gather core requirements out of setup.py.""" reqs_raw = re.search( r"REQUIRES = \[(.*?)\]", Path("setup.py").read_text(), re.S ).group(1) return [x[1] for x in re.findall(r"(['\"])(.*?)\1", reqs_raw)] def gather_recursive_requirements(domain, seen=None): """Recursively gather requirements from a module.""" if seen is None: seen = set() seen.add(domain) integration = Integration(Path(f"homeassistant/components/{domain}")) integration.load_manifest() reqs = set(integration.requirements) for dep_domain in integration.dependencies: reqs.update(gather_recursive_requirements(dep_domain, seen)) return reqs def comment_requirement(req): """Comment out requirement. Some don't install on all systems.""" return any(ign.lower() in req.lower() for ign in COMMENT_REQUIREMENTS) def gather_modules(): """Collect the information.""" reqs = {} errors = [] gather_requirements_from_manifests(errors, reqs) gather_requirements_from_modules(errors, reqs) for key in reqs: reqs[key] = sorted(reqs[key], key=lambda name: (len(name.split(".")), name)) if errors: print("******* ERROR") print("Errors while importing: ", ", ".join(errors)) return None return reqs def gather_requirements_from_manifests(errors, reqs): """Gather all of the requirements from manifests.""" integrations = Integration.load_dir(Path("homeassistant/components")) for domain in sorted(integrations): integration = integrations[domain] if not integration.manifest: errors.append(f"The manifest for integration {domain} is invalid.") continue if integration.disabled: continue process_requirements( errors, integration.requirements, f"homeassistant.components.{domain}", reqs ) def gather_requirements_from_modules(errors, reqs): """Collect the requirements from the modules directly.""" for package in sorted( explore_module("homeassistant.scripts", True) + explore_module("homeassistant.auth", True) ): try: module = importlib.import_module(package) except ImportError as err: print(f"{package.replace('.', '/')}.py: {err}") errors.append(package) continue if getattr(module, "REQUIREMENTS", None): process_requirements(errors, module.REQUIREMENTS, package, reqs) def process_requirements(errors, module_requirements, package, reqs): """Process all of the requirements.""" for req in module_requirements: if "://" in req: errors.append(f"{package}[Only pypi dependencies are allowed: {req}]") if req.partition("==")[1] == "" and req not in IGNORE_PIN: errors.append(f"{package}[Please pin requirement {req}, see {URL_PIN}]") reqs.setdefault(req, []).append(package) def generate_requirements_list(reqs): """Generate a pip file based on requirements.""" output = [] for pkg, requirements in sorted(reqs.items(), key=lambda item: item[0]): for req in sorted(requirements): output.append(f"\n# {req}") if comment_requirement(pkg): output.append(f"\n# {pkg}\n") else: output.append(f"\n{pkg}\n") return "".join(output) def requirements_output(reqs): """Generate output for requirements.""" output = [ "-c homeassistant/package_constraints.txt\n", "\n", "# Home Assistant Core\n", ] output.append("\n".join(core_requirements())) output.append("\n") return "".join(output) def requirements_all_output(reqs): """Generate output for requirements_all.""" output = [ "# Home Assistant Core, full dependency set\n", "-r requirements.txt\n", ] output.append(generate_requirements_list(reqs)) return "".join(output) def requirements_test_all_output(reqs): """Generate output for test_requirements.""" output = [ "# Home Assistant tests, full dependency set\n", f"# Automatically generated by {Path(__file__).name}, do not edit\n", "\n", "-r requirements_test.txt\n", ] filtered = { requirement: modules for requirement, modules in reqs.items() if any( # Always install requirements that are not part of integrations not mdl.startswith("homeassistant.components.") or # Install tests for integrations that have tests has_tests(mdl) for mdl in modules ) } output.append(generate_requirements_list(filtered)) return "".join(output) def requirements_pre_commit_output(): """Generate output for pre-commit dependencies.""" source = ".pre-commit-config.yaml" pre_commit_conf = load_yaml(source) reqs = [] for repo in (x for x in pre_commit_conf["repos"] if x.get("rev")): for hook in repo["hooks"]: if hook["id"] not in IGNORE_PRE_COMMIT_HOOK_ID: reqs.append(f"{hook['id']}=={repo['rev'].lstrip('v')}") reqs.extend(x for x in hook.get("additional_dependencies", ())) output = [ f"# Automatically generated " f"from {source} by {Path(__file__).name}, do not edit", "", ] output.extend(sorted(reqs)) return "\n".join(output) + "\n" def gather_constraints(): """Construct output for constraint file.""" return ( "\n".join( sorted( { *core_requirements(), *gather_recursive_requirements("default_config"), *gather_recursive_requirements("mqtt"), } ) + [""] ) + CONSTRAINT_BASE ) def diff_file(filename, content): """Diff a file.""" return list( difflib.context_diff( [f"{line}\n" for line in Path(filename).read_text().split("\n")], [f"{line}\n" for line in content.split("\n")], filename, "generated", ) ) def main(validate): """Run the script.""" if not os.path.isfile("requirements_all.txt"): print("Run this from HA root dir") return 1 data = gather_modules() if data is None: return 1 reqs_file = requirements_output(data) reqs_all_file = requirements_all_output(data) reqs_test_all_file = requirements_test_all_output(data) reqs_pre_commit_file = requirements_pre_commit_output() constraints = gather_constraints() files = ( ("requirements.txt", reqs_file), ("requirements_all.txt", reqs_all_file), ("requirements_test_pre_commit.txt", reqs_pre_commit_file), ("requirements_test_all.txt", reqs_test_all_file), ("homeassistant/package_constraints.txt", constraints), ) if validate: errors = [] for filename, content in files: diff = diff_file(filename, content) if diff: errors.append("".join(diff)) if errors: print("ERROR - FOUND THE FOLLOWING DIFFERENCES") print() print() print("\n\n".join(errors)) print() print("Please run python3 -m script.gen_requirements_all") return 1 return 0 for filename, content in files: Path(filename).write_text(content) return 0 if __name__ == "__main__": _VAL = sys.argv[-1] == "validate" sys.exit(main(_VAL))
import clinica.pipelines.engine as cpe class SpatialSVM(cpe.Pipeline): """SpatialSVM - Prepare input data for SVM with spatial and anatomical regularization. Returns: A clinica pipeline object containing the SpatialSVM pipeline. """ def check_pipeline_parameters(self): """Check pipeline parameters.""" from clinica.utils.group import check_group_label # Clinica compulsory parameters self.parameters.setdefault("group_label", None) check_group_label(self.parameters["group_label"]) if "orig_input_data" not in self.parameters.keys(): raise KeyError( "Missing compulsory orig_input_data key in pipeline parameter." ) # Optional parameters for inputs from pet-volume pipeline self.parameters.setdefault("acq_label", None) self.parameters.setdefault("suvr_reference_region", None) self.parameters.setdefault("use_pvc_data", False) # Advanced parameters self.parameters.setdefault("fwhm", 4) def check_custom_dependencies(self): """Check dependencies that can not be listed in the `info.json` file.""" def get_input_fields(self): """Specify the list of possible inputs of this pipeline. Returns: A list of (string) input fields name. """ return ["dartel_input", "input_image"] def get_output_fields(self): """Specify the list of possible outputs of this pipeline. Returns: A list of (string) output fields name. """ return ["regularized_image"] def build_input_node(self): """Build and connect an input node to the pipeline.""" import os import nipype.interfaces.utility as nutil import nipype.pipeline.engine as npe from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException from clinica.utils.input_files import ( pet_volume_normalized_suvr_pet, t1_volume_final_group_template, ) from clinica.utils.inputs import clinica_file_reader, clinica_group_reader from clinica.utils.ux import print_groups_in_caps_directory # Check that group already exists if not os.path.exists( os.path.join( self.caps_directory, "groups", "group-" + self.parameters["group_label"] ) ): print_groups_in_caps_directory(self.caps_directory) raise ClinicaException( f"Group {self.parameters["group_label"]} does not exist. " "Did you run pet-volume, t1-volume or t1-volume-create-dartel pipeline?" ) read_parameters_node = npe.Node( name="LoadingCLIArguments", interface=nutil.IdentityInterface( fields=self.get_input_fields(), mandatory_inputs=True ), ) all_errors = [] if self.parameters["orig_input_data"] == "t1-volume": caps_files_information = { "pattern": os.path.join( "t1", "spm", "dartel", "group-" + self.parameters["group_label"], "*_T1w_segm-graymatter_space-Ixi549Space_modulated-on_probability.nii.gz", ), "description": "graymatter tissue segmented in T1w MRI in Ixi549 space", "needed_pipeline": "t1-volume-tissue-segmentation", } elif self.parameters["orig_input_data"] == "pet-volume": if not ( self.parameters["acq_label"] and self.parameters["suvr_reference_region"] ): raise ValueError( f"Missing value(s) in parameters from pet-volume pipeline. Given values:\n" f"- acq_label: {self.parameters["acq_label"]}\n" f"- suvr_reference_region: {self.parameters["suvr_reference_region"]}\n" f"- use_pvc_data: {self.parameters["use_pvc_data"]}\n" ) caps_files_information = pet_volume_normalized_suvr_pet( acq_label=self.parameters["acq_label"], suvr_reference_region=self.parameters["suvr_reference_region"], use_brainmasked_image=False, use_pvc_data=self.parameters["use_pvc_data"], fwhm=0, ) else: raise ValueError( f"Image type {self.parameters["orig_input_data"]} unknown." ) try: input_image, _ = clinica_file_reader( self.subjects, self.sessions, self.caps_directory, caps_files_information, ) except ClinicaException as e: all_errors.append(e) try: dartel_input = clinica_group_reader( self.caps_directory, t1_volume_final_group_template(self.parameters["group_label"]), ) except ClinicaException as e: all_errors.append(e) # Raise all errors if some happened if len(all_errors) > 0: error_message = "Clinica faced errors while trying to read files in your CAPS directories.\n" for msg in all_errors: error_message += str(msg) raise ClinicaCAPSError(error_message) read_parameters_node.inputs.dartel_input = dartel_input read_parameters_node.inputs.input_image = input_image # fmt: off self.connect( [ (read_parameters_node, self.input_node, [("dartel_input", "dartel_input")]), (read_parameters_node, self.input_node, [("input_image", "input_image")]), ] ) # fmt: on def build_output_node(self): """Build and connect an output node to the pipeline.""" def build_core_nodes(self): """Build and connect the core nodes of the pipeline.""" import nipype.interfaces.io as nio import nipype.interfaces.utility as nutil import nipype.pipeline.engine as npe import clinica.pipelines.machine_learning_spatial_svm.spatial_svm_utils as utils fisher_tensor_generation = npe.Node( name="obtain_g_fisher_tensor", interface=nutil.Function( input_names=["dartel_input", "FWHM"], output_names=["fisher_tensor", "fisher_tensor_path"], function=utils.obtain_g_fisher_tensor, ), ) fisher_tensor_generation.inputs.FWHM = self.parameters["fwhm"] time_step_generation = npe.Node( name="estimation_time_step", interface=nutil.Function( input_names=["dartel_input", "FWHM", "g"], output_names=["t_step", "json_file"], function=utils.obtain_time_step_estimation, ), ) time_step_generation.inputs.FWHM = self.parameters["fwhm"] heat_solver_equation = npe.MapNode( name="heat_solver_equation", interface=nutil.Function( input_names=["input_image", "g", "FWHM", "t_step", "dartel_input"], output_names=["regularized_image"], function=utils.heat_solver_equation, ), iterfield=["input_image"], ) heat_solver_equation.inputs.FWHM = self.parameters["fwhm"] datasink = npe.Node(nio.DataSink(), name="sinker") datasink.inputs.base_directory = self.caps_directory datasink.inputs.parameterization = True if self.parameters["orig_input_data"] == "t1-volume": datasink.inputs.regexp_substitutions = [ ( r"(.*)/regularized_image/.*/(.*(sub-(.*)_ses-(.*))_T1w(.*)_probability(.*))$", r"\1/subjects/sub-\4/ses-\5/machine_learning/input_spatial_svm/group-" + self.parameters["group_label"] + r"/\3_T1w\6_spatialregularization\7", ), ( r"(.*)json_file/(output_data.json)$", r"\1/groups/group-" + self.parameters["group_label"] + r"/machine_learning/input_spatial_svm/group-" + self.parameters["group_label"] + r"_space-Ixi549Space_parameters.json", ), ( r"(.*)fisher_tensor_path/(output_fisher_tensor.npy)$", r"\1/groups/group-" + self.parameters["group_label"] + r"/machine_learning/input_spatial_svm/group-" + self.parameters["group_label"] + r"_space-Ixi549Space_gram.npy", ), ] elif self.parameters["orig_input_data"] == "pet-volume": datasink.inputs.regexp_substitutions = [ ( r"(.*)/regularized_image/.*/(.*(sub-(.*)_ses-(.*))_(task.*)_pet(.*))$", r"\1/subjects/sub-\4/ses-\5/machine_learning/input_spatial_svm/group-" + self.parameters["group_label"] + r"/\3_\6_spatialregularization\7", ), ( r"(.*)json_file/(output_data.json)$", r"\1/groups/group-" + self.parameters["group_label"] + r"/machine_learning/input_spatial_svm/group-" + self.parameters["group_label"] + r"_space-Ixi549Space_parameters.json", ), ( r"(.*)fisher_tensor_path/(output_fisher_tensor.npy)$", r"\1/groups/group-" + self.parameters["group_label"] + r"/machine_learning/input_spatial_svm/group-" + self.parameters["group_label"] + r"_space-Ixi549Space_gram.npy", ), ] # Connection # ========== # fmt: off self.connect( [ (self.input_node, fisher_tensor_generation, [("dartel_input", "dartel_input")]), (fisher_tensor_generation, time_step_generation, [("fisher_tensor", "g")]), (self.input_node, time_step_generation, [("dartel_input", "dartel_input")]), (self.input_node, heat_solver_equation, [("input_image", "input_image")]), (fisher_tensor_generation, heat_solver_equation, [("fisher_tensor", "g")]), (time_step_generation, heat_solver_equation, [("t_step", "t_step")]), (self.input_node, heat_solver_equation, [("dartel_input", "dartel_input")]), (fisher_tensor_generation, datasink, [("fisher_tensor_path", "fisher_tensor_path")]), (time_step_generation, datasink, [("json_file", "json_file")]), (heat_solver_equation, datasink, [("regularized_image", "regularized_image")]), ] ) # fmt: on
import clinica.pipelines.engine as cpe class SpatialSVM(cpe.Pipeline): """SpatialSVM - Prepare input data for SVM with spatial and anatomical regularization. Returns: A clinica pipeline object containing the SpatialSVM pipeline. """ def check_pipeline_parameters(self): """Check pipeline parameters.""" from clinica.utils.group import check_group_label # Clinica compulsory parameters self.parameters.setdefault("group_label", None) check_group_label(self.parameters["group_label"]) if "orig_input_data" not in self.parameters.keys(): raise KeyError( "Missing compulsory orig_input_data key in pipeline parameter." ) # Optional parameters for inputs from pet-volume pipeline self.parameters.setdefault("acq_label", None) self.parameters.setdefault("suvr_reference_region", None) self.parameters.setdefault("use_pvc_data", False) # Advanced parameters self.parameters.setdefault("fwhm", 4) def check_custom_dependencies(self): """Check dependencies that can not be listed in the `info.json` file.""" def get_input_fields(self): """Specify the list of possible inputs of this pipeline. Returns: A list of (string) input fields name. """ return ["dartel_input", "input_image"] def get_output_fields(self): """Specify the list of possible outputs of this pipeline. Returns: A list of (string) output fields name. """ return ["regularized_image"] def build_input_node(self): """Build and connect an input node to the pipeline.""" import os import nipype.interfaces.utility as nutil import nipype.pipeline.engine as npe from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException from clinica.utils.input_files import ( pet_volume_normalized_suvr_pet, t1_volume_final_group_template, ) from clinica.utils.inputs import clinica_file_reader, clinica_group_reader from clinica.utils.ux import print_groups_in_caps_directory # Check that group already exists if not os.path.exists( os.path.join( self.caps_directory, "groups", "group-" + self.parameters["group_label"] ) ): print_groups_in_caps_directory(self.caps_directory) raise ClinicaException( f"Group {self.parameters['group_label']} does not exist. " "Did you run pet-volume, t1-volume or t1-volume-create-dartel pipeline?" ) read_parameters_node = npe.Node( name="LoadingCLIArguments", interface=nutil.IdentityInterface( fields=self.get_input_fields(), mandatory_inputs=True ), ) all_errors = [] if self.parameters["orig_input_data"] == "t1-volume": caps_files_information = { "pattern": os.path.join( "t1", "spm", "dartel", "group-" + self.parameters["group_label"], "*_T1w_segm-graymatter_space-Ixi549Space_modulated-on_probability.nii.gz", ), "description": "graymatter tissue segmented in T1w MRI in Ixi549 space", "needed_pipeline": "t1-volume-tissue-segmentation", } elif self.parameters["orig_input_data"] == "pet-volume": if not ( self.parameters["acq_label"] and self.parameters["suvr_reference_region"] ): raise ValueError( f"Missing value(s) in parameters from pet-volume pipeline. Given values:\n" f"- acq_label: {self.parameters['acq_label']}\n" f"- suvr_reference_region: {self.parameters['suvr_reference_region']}\n" f"- use_pvc_data: {self.parameters['use_pvc_data']}\n" ) caps_files_information = pet_volume_normalized_suvr_pet( acq_label=self.parameters["acq_label"], suvr_reference_region=self.parameters["suvr_reference_region"], use_brainmasked_image=False, use_pvc_data=self.parameters["use_pvc_data"], fwhm=0, ) else: raise ValueError( f"Image type {self.parameters['orig_input_data']} unknown." ) try: input_image, _ = clinica_file_reader( self.subjects, self.sessions, self.caps_directory, caps_files_information, ) except ClinicaException as e: all_errors.append(e) try: dartel_input = clinica_group_reader( self.caps_directory, t1_volume_final_group_template(self.parameters["group_label"]), ) except ClinicaException as e: all_errors.append(e) # Raise all errors if some happened if len(all_errors) > 0: error_message = "Clinica faced errors while trying to read files in your CAPS directories.\n" for msg in all_errors: error_message += str(msg) raise ClinicaCAPSError(error_message) read_parameters_node.inputs.dartel_input = dartel_input read_parameters_node.inputs.input_image = input_image # fmt: off self.connect( [ (read_parameters_node, self.input_node, [("dartel_input", "dartel_input")]), (read_parameters_node, self.input_node, [("input_image", "input_image")]), ] ) # fmt: on def build_output_node(self): """Build and connect an output node to the pipeline.""" def build_core_nodes(self): """Build and connect the core nodes of the pipeline.""" import nipype.interfaces.io as nio import nipype.interfaces.utility as nutil import nipype.pipeline.engine as npe import clinica.pipelines.machine_learning_spatial_svm.spatial_svm_utils as utils fisher_tensor_generation = npe.Node( name="obtain_g_fisher_tensor", interface=nutil.Function( input_names=["dartel_input", "FWHM"], output_names=["fisher_tensor", "fisher_tensor_path"], function=utils.obtain_g_fisher_tensor, ), ) fisher_tensor_generation.inputs.FWHM = self.parameters["fwhm"] time_step_generation = npe.Node( name="estimation_time_step", interface=nutil.Function( input_names=["dartel_input", "FWHM", "g"], output_names=["t_step", "json_file"], function=utils.obtain_time_step_estimation, ), ) time_step_generation.inputs.FWHM = self.parameters["fwhm"] heat_solver_equation = npe.MapNode( name="heat_solver_equation", interface=nutil.Function( input_names=["input_image", "g", "FWHM", "t_step", "dartel_input"], output_names=["regularized_image"], function=utils.heat_solver_equation, ), iterfield=["input_image"], ) heat_solver_equation.inputs.FWHM = self.parameters["fwhm"] datasink = npe.Node(nio.DataSink(), name="sinker") datasink.inputs.base_directory = self.caps_directory datasink.inputs.parameterization = True if self.parameters["orig_input_data"] == "t1-volume": datasink.inputs.regexp_substitutions = [ ( r"(.*)/regularized_image/.*/(.*(sub-(.*)_ses-(.*))_T1w(.*)_probability(.*))$", r"\1/subjects/sub-\4/ses-\5/machine_learning/input_spatial_svm/group-" + self.parameters["group_label"] + r"/\3_T1w\6_spatialregularization\7", ), ( r"(.*)json_file/(output_data.json)$", r"\1/groups/group-" + self.parameters["group_label"] + r"/machine_learning/input_spatial_svm/group-" + self.parameters["group_label"] + r"_space-Ixi549Space_parameters.json", ), ( r"(.*)fisher_tensor_path/(output_fisher_tensor.npy)$", r"\1/groups/group-" + self.parameters["group_label"] + r"/machine_learning/input_spatial_svm/group-" + self.parameters["group_label"] + r"_space-Ixi549Space_gram.npy", ), ] elif self.parameters["orig_input_data"] == "pet-volume": datasink.inputs.regexp_substitutions = [ ( r"(.*)/regularized_image/.*/(.*(sub-(.*)_ses-(.*))_(task.*)_pet(.*))$", r"\1/subjects/sub-\4/ses-\5/machine_learning/input_spatial_svm/group-" + self.parameters["group_label"] + r"/\3_\6_spatialregularization\7", ), ( r"(.*)json_file/(output_data.json)$", r"\1/groups/group-" + self.parameters["group_label"] + r"/machine_learning/input_spatial_svm/group-" + self.parameters["group_label"] + r"_space-Ixi549Space_parameters.json", ), ( r"(.*)fisher_tensor_path/(output_fisher_tensor.npy)$", r"\1/groups/group-" + self.parameters["group_label"] + r"/machine_learning/input_spatial_svm/group-" + self.parameters["group_label"] + r"_space-Ixi549Space_gram.npy", ), ] # Connection # ========== # fmt: off self.connect( [ (self.input_node, fisher_tensor_generation, [("dartel_input", "dartel_input")]), (fisher_tensor_generation, time_step_generation, [("fisher_tensor", "g")]), (self.input_node, time_step_generation, [("dartel_input", "dartel_input")]), (self.input_node, heat_solver_equation, [("input_image", "input_image")]), (fisher_tensor_generation, heat_solver_equation, [("fisher_tensor", "g")]), (time_step_generation, heat_solver_equation, [("t_step", "t_step")]), (self.input_node, heat_solver_equation, [("dartel_input", "dartel_input")]), (fisher_tensor_generation, datasink, [("fisher_tensor_path", "fisher_tensor_path")]), (time_step_generation, datasink, [("json_file", "json_file")]), (heat_solver_equation, datasink, [("regularized_image", "regularized_image")]), ] ) # fmt: on
import re import shlex import asyncio import warnings from datetime import datetime from functools import partial, update_wrapper from typing import (Tuple, Union, Callable, Iterable, Any, Optional, List, Dict, Awaitable, Pattern, Type) from aiocqhttp import Event as CQEvent from aiocqhttp.message import Message from nonebot import NoneBot, permission as perm from nonebot.command.argfilter import ValidateError from nonebot.helpers import context_id, send, render_expression from nonebot.log import logger from nonebot.session import BaseSession from nonebot.typing import (CommandName_T, CommandArgs_T, CommandHandler_T, Message_T, State_T, Filter_T, Patterns_T) # key: context id # value: CommandSession object _sessions = {} # type: Dict[str, "CommandSession"] class Command: __slots__ = ('name', 'func', 'permission', 'only_to_me', 'privileged', 'args_parser_func', 'session_impl') def __init__(self, *, name: CommandName_T, func: CommandHandler_T, permission: int, only_to_me: bool, privileged: bool, session_implement: Optional[Type['CommandSession']]): self.name = name self.func = func self.permission = permission self.only_to_me = only_to_me self.privileged = privileged self.args_parser_func: Optional[CommandHandler_T] = None self.session_impl = session_implement async def run(self, session: 'CommandSession', *, check_perm: bool = True, dry: bool = False) -> bool: """ Run the command in a given session. :param session: CommandSession object :param check_perm: should check permission before running :param dry: just check any prerequisite, without actually running :return: the command is finished (or can be run, given dry == True) """ has_perm = await self._check_perm(session) if check_perm else True if self.func and has_perm: if dry: return True if session.current_arg_filters is not None and \ session.current_key is not None: # argument-level filters are given, use them arg = session.current_arg config = session.bot.config for f in session.current_arg_filters: try: res = f(arg) if isinstance(res, Awaitable): res = await res arg = res except ValidateError as e: # validation failed if config.MAX_VALIDATION_FAILURES > 0: # should check number of validation failures session.state['__validation_failure_num'] = \ session.state.get( '__validation_failure_num', 0) + 1 if session.state['__validation_failure_num'] >= \ config.MAX_VALIDATION_FAILURES: # noinspection PyProtectedMember session.finish( render_expression( config. TOO_MANY_VALIDATION_FAILURES_EXPRESSION ), **session._current_send_kwargs) failure_message = e.message if failure_message is None: failure_message = render_expression( config.DEFAULT_VALIDATION_FAILURE_EXPRESSION) # noinspection PyProtectedMember session.pause(failure_message, **session._current_send_kwargs) # passed all filters session.state[session.current_key] = arg else: # fallback to command-level args_parser_func if self.args_parser_func: await self.args_parser_func(session) if session.current_key is not None and \ session.current_key not in session.state: # args_parser_func didn't set state, here we set it session.state[session.current_key] = session.current_arg await self.func(session) return True return False async def _check_perm(self, session) -> bool: """ Check if the session has sufficient permission to call the command. :param session: CommandSession object :return: the session has the permission """ return await perm.check_permission(session.bot, session.event, self.permission) def args_parser(self, parser_func: CommandHandler_T) -> CommandHandler_T: """ Decorator to register a function as the arguments parser of the corresponding command. """ self.args_parser_func = parser_func return parser_func def __repr__(self): return f'<Command, name={self.name.__repr__()}>' def __str__(self): return self.__repr__() class CommandManager: """Global Command Manager""" _commands = {} # type: Dict[CommandName_T, Command] _aliases = {} # type: Dict[str, Command] _switches = {} # type: Dict[Command, bool] _patterns = {} # type: Dict[Pattern, Command] def __init__(self): self.commands = CommandManager._commands.copy() self.aliases = CommandManager._aliases.copy() self.switches = CommandManager._switches.copy() self.patterns = CommandManager._patterns.copy() @classmethod def add_command(cls, cmd_name: CommandName_T, cmd: Command) -> None: """Register a command Args: cmd_name (CommandName_T): Command name cmd (Command): Command object """ if cmd_name in cls._commands: warnings.warn(f"Command {cmd_name} already exists") return cls._switches[cmd] = True cls._commands[cmd_name] = cmd @classmethod def reload_command(cls, cmd_name: CommandName_T, cmd: Command) -> None: """Reload a command **Warning! Dangerous function** Args: cmd_name (CommandName_T): Command name cmd (Command): Command object """ if cmd_name not in cls._commands: warnings.warn( f"Command {cmd_name} does not exist. Please use add_command instead" ) return cmd_ = cls._commands[cmd_name] if cmd_ in cls._switches: del cls._switches[cmd_] cls._switches[cmd] = True cls._commands[cmd_name] = cmd @classmethod def remove_command(cls, cmd_name: CommandName_T) -> bool: """Remove a command **Warning! Dangerous function** Args: cmd_name (CommandName_T): Command name to remove Returns: bool: Success or not """ if cmd_name in cls._commands: cmd = cls._commands[cmd_name] for alias in list( filter(lambda x: cls._aliases[x] == cmd, cls._aliases.keys())): del cls._aliases[alias] del cls._commands[cmd_name] if cmd in cls._switches: del cls._switches[cmd] return True return False @classmethod def switch_command_global(cls, cmd_name: CommandName_T, state: Optional[bool] = None): """Change command state globally or simply switch it if `state` is None Args: cmd_name (CommandName_T): Command name state (Optional[bool]): State to change to. Defaults to None. """ cmd = cls._commands[cmd_name] cls._switches[cmd] = not cls._switches[cmd] if state is None else bool( state) @classmethod def add_aliases(cls, aliases: Union[Iterable[str], str], cmd: Command): """Register command alias(es) Args: aliases (Union[Iterable[str], str]): Command aliases cmd_name (Command): Command """ if isinstance(aliases, str): aliases = (aliases,) for alias in aliases: if not isinstance(alias, str): warnings.warn(f"Alias {alias} is not a string! Ignored") return elif alias in cls._aliases: warnings.warn(f"Alias {alias} already exists") return cls._aliases[alias] = cmd @classmethod def add_patterns(cls, patterns: Patterns_T, cmd: Command): """Register command alias(es) Args: patterns (Union[Iterable[Pattern], Pattern, Iterable[str], str]): Command patterns cmd (Command): Matched command """ if isinstance(patterns, (str, Pattern)): patterns = (patterns,) for pattern in patterns: if isinstance(pattern, str): pattern = re.compile(pattern) if not isinstance(pattern, Pattern): warnings.warn( f"Pattern {pattern} is not a regex or string! Ignored") continue elif pattern in cls._patterns: warnings.warn(f"Pattern {pattern} already exists") continue cls._patterns[pattern] = cmd def _add_command_to_tree(self, cmd_name: CommandName_T, cmd: Command, tree: Dict[str, Union[Dict, Command]]) -> None: """Add command to the target command tree. Args: cmd_name (CommandName_T): Name of the command cmd (Command): Command object tree (Dict[str, Union[Dict, Command]): Target command tree """ current_parent = tree for parent_key in cmd_name[:-1]: current_parent[parent_key] = current_parent.get(parent_key) or {} current_parent = current_parent[parent_key] if not isinstance(current_parent, dict): warnings.warn(f"{current_parent} is not a registry dict") return if cmd_name[-1] in current_parent: warnings.warn(f"There is already a command named {cmd_name}") return current_parent[cmd_name[-1]] = cmd def _generate_command_tree( self, commands: Dict[CommandName_T, Command]) -> Dict[str, Union[Dict, Command]]: """Generate command tree from commands dictionary. Args: commands (Dict[CommandName_T, Command]): Dictionary of commands Returns: Dict[str, Union[Dict, "Command"]]: Command tree """ cmd_tree = {} #type: Dict[str, Union[Dict, "Command"]] for cmd_name, cmd in commands.items(): self._add_command_to_tree(cmd_name, cmd, cmd_tree) return cmd_tree def _find_command(self, name: Union[str, CommandName_T]) -> Optional[Command]: cmd_name = (name,) if isinstance(name, str) else name if not cmd_name: return None # cmd_tree = self._generate_command_tree({ # name: cmd # for name, cmd in self.commands.items() # if self.switches.get(cmd, True) # }) # for part in cmd_name[:-1]: # if part not in cmd_tree or not isinstance( # cmd_tree[part], #type: ignore # dict): # return None # cmd_tree = cmd_tree[part] # type: ignore # cmd = cmd_tree.get(cmd_name[-1]) # type: ignore # return cmd if isinstance(cmd, Command) else None cmd = { name: cmd for name, cmd in self.commands.items() if self.switches.get(cmd, True) }.get(cmd_name) return cmd def parse_command( self, bot: NoneBot, event: CQEvent) -> Tuple[Optional[Command], Optional[str]]: cmd_string = str(event.message).lstrip() logger.debug(f'Parsing command: {repr(cmd_string)}') matched_start = None for start in bot.config.COMMAND_START: # loop through COMMAND_START to find the longest matched start curr_matched_start = None if isinstance(start, type(re.compile(''))): m = start.search(cmd_string) if m and m.start(0) == 0: curr_matched_start = m.group(0) elif isinstance(start, str): if cmd_string.startswith(start): curr_matched_start = start if curr_matched_start is not None and \ (matched_start is None or len(curr_matched_start) > len(matched_start)): # a longer start, use it matched_start = curr_matched_start if matched_start is None: # it's not a command logger.debug('It\'s not a command') return None, None logger.debug(f'Matched command start: ' f'{matched_start}{'(empty)' if not matched_start else ''}') full_command = cmd_string[len(matched_start):].lstrip() if not full_command: # command is empty return None, None cmd_name_text, *cmd_remained = full_command.split(maxsplit=1) cmd_name = None for sep in bot.config.COMMAND_SEP: # loop through COMMAND_SEP to find the most optimized split curr_cmd_name = None if isinstance(sep, type(re.compile(''))): curr_cmd_name = tuple(sep.split(cmd_name_text)) elif isinstance(sep, str): curr_cmd_name = tuple(cmd_name_text.split(sep)) if curr_cmd_name is not None and \ (not cmd_name or len(curr_cmd_name) > len(cmd_name)): # a more optimized split, use it cmd_name = curr_cmd_name if not cmd_name: cmd_name = (cmd_name_text,) logger.debug(f'Split command name: {cmd_name}') cmd = self._find_command(cmd_name) # type: ignore if not cmd: logger.debug(f'Command {cmd_name} not found. Try to match aliases') cmd = { name: cmd for name, cmd in self.aliases.items() if self.switches.get(cmd, True) }.get(cmd_name_text) if not cmd: logger.debug(f'Alias {cmd_name} not found. Try to match patterns') patterns = { pattern: cmd for pattern, cmd in self.patterns.items() if self.switches.get(cmd, True) } for pattern in patterns: if pattern.search(full_command): cmd = patterns[pattern] logger.debug( f'Pattern {pattern} of command {cmd.name} matched, function: {cmd.func}' ) # if command matched by regex, it will use the full_command as the current_arg of the session return cmd, full_command if not cmd: return None, None logger.debug(f'Command {cmd.name} found, function: {cmd.func}') return cmd, ''.join(cmd_remained) def switch_command(self, cmd_name: CommandName_T, state: Optional[bool] = None): """Change command state or simply switch it if `state` is None Args: cmd_name (CommandName_T): Command name state (Optional[bool]): State to change to. Defaults to None. """ cmd = self.commands[cmd_name] self.switches[cmd] = not self.switches[cmd] if state is None else bool( state) class CommandInterrupt(Exception): pass class _PauseException(CommandInterrupt): """ Raised by session.pause() indicating that the command session should be paused to ask the user for some arguments. """ pass class _FinishException(CommandInterrupt): """ Raised by session.finish() indicating that the command session should be stopped and removed. """ def __init__(self, result: bool = True): """ :param result: succeeded to call the command """ self.result = result class SwitchException(CommandInterrupt): """ Raised by session.switch() indicating that the command session should be stopped and replaced with a new one (going through handle_message() again). Since the new message will go through handle_message() again, the later function should be notified. So this exception is intended to be propagated to handle_message(). """ def __init__(self, new_message: Message): """ :param new_message: new message which should be placed in event """ self.new_message = new_message class CommandSession(BaseSession): __slots__ = ('cmd', 'current_key', 'current_arg_filters', '_current_send_kwargs', 'current_arg', '_current_arg_text', '_current_arg_images', '_state', '_last_interaction', '_running', '_run_future') def __init__(self, bot: NoneBot, event: CQEvent, cmd: Command, *, current_arg: Optional[str] = '', args: Optional[CommandArgs_T] = None): super().__init__(bot, event) self.cmd = cmd # Command object # unique key of the argument that is currently requesting (asking) self.current_key: Optional[str] = None # initialize current argument filters self.current_arg_filters: Optional[List[Filter_T]] = None self._current_send_kwargs: Dict[str, Any] = {} # initialize current argument self.current_arg: Optional[str] = '' # with potential CQ codes self._current_arg_text = None self._current_arg_images = None self.refresh(event, current_arg=current_arg) # fill the above self._run_future = partial(asyncio.run_coroutine_threadsafe, loop=bot.loop) self._state: State_T = {} if args: self._state.update(args) self._last_interaction = None # last interaction time of this session self._running = False @property def state(self) -> State_T: """ State of the session. This contains all named arguments and other session scope temporary values. """ return self._state @property def args(self) -> CommandArgs_T: """Deprecated. Use `session.state` instead.""" return self.state @property def running(self) -> bool: return self._running @running.setter def running(self, value) -> None: if self._running is True and value is False: # change status from running to not running, record the time self._last_interaction = datetime.now() self._running = value @property def is_valid(self) -> bool: """Check if the session is expired or not.""" if self.bot.config.SESSION_EXPIRE_TIMEOUT and \ self._last_interaction and \ datetime.now() - self._last_interaction > \ self.bot.config.SESSION_EXPIRE_TIMEOUT: return False return True @property def is_first_run(self) -> bool: return self._last_interaction is None @property def current_arg_text(self) -> str: """ Plain text part in the current argument, without any CQ codes. """ if self._current_arg_text is None: self._current_arg_text = Message( self.current_arg).extract_plain_text() return self._current_arg_text @property def current_arg_images(self) -> List[str]: """ Images (as list of urls) in the current argument. """ if self._current_arg_images is None: self._current_arg_images = [ s.data['url'] for s in Message(self.current_arg) if s.type == 'image' and 'url' in s.data ] return self._current_arg_images @property def argv(self) -> List[str]: """ Shell-like argument list, similar to sys.argv. Only available while shell_like is True in on_command decorator. """ return self.state.get('argv', []) def refresh(self, event: CQEvent, *, current_arg: Optional[str] = '') -> None: """ Refill the session with a new message event. :param event: new message event :param current_arg: new command argument as a string """ self.event = event self.current_arg = current_arg self._current_arg_text = None self._current_arg_images = None def get(self, key: str, *, prompt: Optional[Message_T] = None, arg_filters: Optional[List[Filter_T]] = None, **kwargs) -> Any: """ Get an argument with a given key. If the argument does not exist in the current session, a pause exception will be raised, and the caller of the command will know it should keep the session for further interaction with the user. :param key: argument key :param prompt: prompt to ask the user :param arg_filters: argument filters for the next user input :return: the argument value """ if key in self.state: return self.state[key] self.current_key = key self.current_arg_filters = arg_filters self._current_send_kwargs = kwargs self.pause(prompt, **kwargs) def get_optional(self, key: str, default: Optional[Any] = None) -> Optional[Any]: """ Simply get a argument with given key. Deprecated. Use `session.state.get()` instead. """ return self.state.get(key, default) def pause(self, message: Optional[Message_T] = None, **kwargs) -> None: """Pause the session for further interaction.""" if message: self._run_future(self.send(message, **kwargs)) raise _PauseException def finish(self, message: Optional[Message_T] = None, **kwargs) -> None: """Finish the session.""" if message: self._run_future(self.send(message, **kwargs)) raise _FinishException def switch(self, new_message: Message_T) -> None: """ Finish the session and switch to a new (fake) message event. The user may send another command (or another intention as natural language) when interacting with the current session. In this case, the session may not understand what the user is saying, so it should call this method and pass in that message, then NoneBot will handle the situation properly. """ if self.is_first_run: # if calling this method during first run, # we think the command is not handled raise _FinishException(result=False) if not isinstance(new_message, Message): new_message = Message(new_message) raise SwitchException(new_message) async def handle_command(bot: NoneBot, event: CQEvent, manager: CommandManager) -> Optional[bool]: """ Handle a message as a command. This function is typically called by "handle_message". :param bot: NoneBot instance :param event: message event :param manager: command manager :return: the message is handled as a command """ cmd, current_arg = manager.parse_command(bot, event) is_privileged_cmd = cmd and cmd.privileged if is_privileged_cmd and cmd.only_to_me and not event['to_me']: is_privileged_cmd = False disable_interaction = bool(is_privileged_cmd) if is_privileged_cmd: logger.debug(f'Command {cmd.name} is a privileged command') ctx_id = context_id(event) if not is_privileged_cmd: # wait for 1.5 seconds (at most) if the current session is running retry = 5 while retry > 0 and \ _sessions.get(ctx_id) and _sessions[ctx_id].running: retry -= 1 await asyncio.sleep(0.3) check_perm = True session: Optional[CommandSession] = _sessions.get( ctx_id) if not is_privileged_cmd else None if session is not None: if session.running: logger.warning(f'There is a session of command ' f'{session.cmd.name} running, notify the user') asyncio.ensure_future( send(bot, event, render_expression(bot.config.SESSION_RUNNING_EXPRESSION))) # pretend we are successful, so that NLP won't handle it return True if session.is_valid: logger.debug(f'Session of command {session.cmd.name} exists') # since it's in a session, the user must be talking to me event['to_me'] = True session.refresh(event, current_arg=str(event['message'])) # there is no need to check permission for existing session check_perm = False else: # the session is expired, remove it logger.debug(f'Session of command {session.cmd.name} is expired') if ctx_id in _sessions: del _sessions[ctx_id] session = None if session is None: if not cmd: logger.debug('Not a known command, ignored') return False if cmd.only_to_me and not event['to_me']: logger.debug('Not to me, ignored') return False SessionImpl = cmd.session_impl or CommandSession session = SessionImpl(bot, event, cmd, current_arg=current_arg) logger.debug(f'New session of command {session.cmd.name} created') assert isinstance(session, CommandSession) return await _real_run_command(session, ctx_id, check_perm=check_perm, disable_interaction=disable_interaction) async def call_command(bot: NoneBot, event: CQEvent, name: Union[str, CommandName_T], *, current_arg: str = '', args: Optional[CommandArgs_T] = None, check_perm: bool = True, disable_interaction: bool = False) -> Optional[bool]: """ Call a command internally. This function is typically called by some other commands or "handle_natural_language" when handling NLPResult object. Note: If disable_interaction is not True, after calling this function, any previous command session will be overridden, even if the command being called here does not need further interaction (a.k.a asking the user for more info). :param bot: NoneBot instance :param event: message event :param name: command name :param current_arg: command current argument string :param args: command args :param check_perm: should check permission before running command :param disable_interaction: disable the command's further interaction :return: the command is successfully called """ cmd = CommandManager()._find_command(name) if not cmd: return False SessionImplement = cmd.session_impl or CommandSession session = SessionImplement(bot, event, cmd, current_arg=current_arg, args=args) return await _real_run_command(session, context_id(session.event), check_perm=check_perm, disable_interaction=disable_interaction) async def _real_run_command(session: CommandSession, ctx_id: str, disable_interaction: bool = False, **kwargs) -> Optional[bool]: if not disable_interaction: # override session only when interaction is not disabled _sessions[ctx_id] = session try: logger.debug(f'Running command {session.cmd.name}') session.running = True future = asyncio.ensure_future(session.cmd.run(session, **kwargs)) timeout = None if session.bot.config.SESSION_RUN_TIMEOUT: timeout = session.bot.config.SESSION_RUN_TIMEOUT.total_seconds() try: await asyncio.wait_for(future, timeout) handled = future.result() except asyncio.TimeoutError: handled = True except (_PauseException, _FinishException, SwitchException) as e: raise e except Exception as e: logger.error(f'An exception occurred while ' f'running command {session.cmd.name}:') logger.exception(e) handled = True raise _FinishException(handled) except _PauseException: session.running = False if disable_interaction: # if the command needs further interaction, we view it as failed return False logger.debug(f'Further interaction needed for ' f'command {session.cmd.name}') # return True because this step of the session is successful return True except (_FinishException, SwitchException) as e: session.running = False logger.debug(f'Session of command {session.cmd.name} finished') if not disable_interaction and ctx_id in _sessions: # the command is finished, remove the session, # but if interaction is disabled during this command call, # we leave the _sessions untouched. del _sessions[ctx_id] if isinstance(e, _FinishException): return e.result elif isinstance(e, SwitchException): # we are guaranteed that the session is not first run here, # which means interaction is definitely enabled, # so we can safely touch _sessions here. if ctx_id in _sessions: # make sure there is no session waiting del _sessions[ctx_id] logger.debug(f'Session of command {session.cmd.name} switching, ' f'new message: {e.new_message}') raise e # this is intended to be propagated to handle_message() def kill_current_session(event: CQEvent) -> None: """ Force kill current session of the given event context, despite whether it is running or not. :param event: message event """ ctx_id = context_id(event) if ctx_id in _sessions: del _sessions[ctx_id] from nonebot.command.group import CommandGroup
import re import shlex import asyncio import warnings from datetime import datetime from functools import partial, update_wrapper from typing import (Tuple, Union, Callable, Iterable, Any, Optional, List, Dict, Awaitable, Pattern, Type) from aiocqhttp import Event as CQEvent from aiocqhttp.message import Message from nonebot import NoneBot, permission as perm from nonebot.command.argfilter import ValidateError from nonebot.helpers import context_id, send, render_expression from nonebot.log import logger from nonebot.session import BaseSession from nonebot.typing import (CommandName_T, CommandArgs_T, CommandHandler_T, Message_T, State_T, Filter_T, Patterns_T) # key: context id # value: CommandSession object _sessions = {} # type: Dict[str, "CommandSession"] class Command: __slots__ = ('name', 'func', 'permission', 'only_to_me', 'privileged', 'args_parser_func', 'session_impl') def __init__(self, *, name: CommandName_T, func: CommandHandler_T, permission: int, only_to_me: bool, privileged: bool, session_implement: Optional[Type['CommandSession']]): self.name = name self.func = func self.permission = permission self.only_to_me = only_to_me self.privileged = privileged self.args_parser_func: Optional[CommandHandler_T] = None self.session_impl = session_implement async def run(self, session: 'CommandSession', *, check_perm: bool = True, dry: bool = False) -> bool: """ Run the command in a given session. :param session: CommandSession object :param check_perm: should check permission before running :param dry: just check any prerequisite, without actually running :return: the command is finished (or can be run, given dry == True) """ has_perm = await self._check_perm(session) if check_perm else True if self.func and has_perm: if dry: return True if session.current_arg_filters is not None and \ session.current_key is not None: # argument-level filters are given, use them arg = session.current_arg config = session.bot.config for f in session.current_arg_filters: try: res = f(arg) if isinstance(res, Awaitable): res = await res arg = res except ValidateError as e: # validation failed if config.MAX_VALIDATION_FAILURES > 0: # should check number of validation failures session.state['__validation_failure_num'] = \ session.state.get( '__validation_failure_num', 0) + 1 if session.state['__validation_failure_num'] >= \ config.MAX_VALIDATION_FAILURES: # noinspection PyProtectedMember session.finish( render_expression( config. TOO_MANY_VALIDATION_FAILURES_EXPRESSION ), **session._current_send_kwargs) failure_message = e.message if failure_message is None: failure_message = render_expression( config.DEFAULT_VALIDATION_FAILURE_EXPRESSION) # noinspection PyProtectedMember session.pause(failure_message, **session._current_send_kwargs) # passed all filters session.state[session.current_key] = arg else: # fallback to command-level args_parser_func if self.args_parser_func: await self.args_parser_func(session) if session.current_key is not None and \ session.current_key not in session.state: # args_parser_func didn't set state, here we set it session.state[session.current_key] = session.current_arg await self.func(session) return True return False async def _check_perm(self, session) -> bool: """ Check if the session has sufficient permission to call the command. :param session: CommandSession object :return: the session has the permission """ return await perm.check_permission(session.bot, session.event, self.permission) def args_parser(self, parser_func: CommandHandler_T) -> CommandHandler_T: """ Decorator to register a function as the arguments parser of the corresponding command. """ self.args_parser_func = parser_func return parser_func def __repr__(self): return f'<Command, name={self.name.__repr__()}>' def __str__(self): return self.__repr__() class CommandManager: """Global Command Manager""" _commands = {} # type: Dict[CommandName_T, Command] _aliases = {} # type: Dict[str, Command] _switches = {} # type: Dict[Command, bool] _patterns = {} # type: Dict[Pattern, Command] def __init__(self): self.commands = CommandManager._commands.copy() self.aliases = CommandManager._aliases.copy() self.switches = CommandManager._switches.copy() self.patterns = CommandManager._patterns.copy() @classmethod def add_command(cls, cmd_name: CommandName_T, cmd: Command) -> None: """Register a command Args: cmd_name (CommandName_T): Command name cmd (Command): Command object """ if cmd_name in cls._commands: warnings.warn(f"Command {cmd_name} already exists") return cls._switches[cmd] = True cls._commands[cmd_name] = cmd @classmethod def reload_command(cls, cmd_name: CommandName_T, cmd: Command) -> None: """Reload a command **Warning! Dangerous function** Args: cmd_name (CommandName_T): Command name cmd (Command): Command object """ if cmd_name not in cls._commands: warnings.warn( f"Command {cmd_name} does not exist. Please use add_command instead" ) return cmd_ = cls._commands[cmd_name] if cmd_ in cls._switches: del cls._switches[cmd_] cls._switches[cmd] = True cls._commands[cmd_name] = cmd @classmethod def remove_command(cls, cmd_name: CommandName_T) -> bool: """Remove a command **Warning! Dangerous function** Args: cmd_name (CommandName_T): Command name to remove Returns: bool: Success or not """ if cmd_name in cls._commands: cmd = cls._commands[cmd_name] for alias in list( filter(lambda x: cls._aliases[x] == cmd, cls._aliases.keys())): del cls._aliases[alias] del cls._commands[cmd_name] if cmd in cls._switches: del cls._switches[cmd] return True return False @classmethod def switch_command_global(cls, cmd_name: CommandName_T, state: Optional[bool] = None): """Change command state globally or simply switch it if `state` is None Args: cmd_name (CommandName_T): Command name state (Optional[bool]): State to change to. Defaults to None. """ cmd = cls._commands[cmd_name] cls._switches[cmd] = not cls._switches[cmd] if state is None else bool( state) @classmethod def add_aliases(cls, aliases: Union[Iterable[str], str], cmd: Command): """Register command alias(es) Args: aliases (Union[Iterable[str], str]): Command aliases cmd_name (Command): Command """ if isinstance(aliases, str): aliases = (aliases,) for alias in aliases: if not isinstance(alias, str): warnings.warn(f"Alias {alias} is not a string! Ignored") return elif alias in cls._aliases: warnings.warn(f"Alias {alias} already exists") return cls._aliases[alias] = cmd @classmethod def add_patterns(cls, patterns: Patterns_T, cmd: Command): """Register command alias(es) Args: patterns (Union[Iterable[Pattern], Pattern, Iterable[str], str]): Command patterns cmd (Command): Matched command """ if isinstance(patterns, (str, Pattern)): patterns = (patterns,) for pattern in patterns: if isinstance(pattern, str): pattern = re.compile(pattern) if not isinstance(pattern, Pattern): warnings.warn( f"Pattern {pattern} is not a regex or string! Ignored") continue elif pattern in cls._patterns: warnings.warn(f"Pattern {pattern} already exists") continue cls._patterns[pattern] = cmd def _add_command_to_tree(self, cmd_name: CommandName_T, cmd: Command, tree: Dict[str, Union[Dict, Command]]) -> None: """Add command to the target command tree. Args: cmd_name (CommandName_T): Name of the command cmd (Command): Command object tree (Dict[str, Union[Dict, Command]): Target command tree """ current_parent = tree for parent_key in cmd_name[:-1]: current_parent[parent_key] = current_parent.get(parent_key) or {} current_parent = current_parent[parent_key] if not isinstance(current_parent, dict): warnings.warn(f"{current_parent} is not a registry dict") return if cmd_name[-1] in current_parent: warnings.warn(f"There is already a command named {cmd_name}") return current_parent[cmd_name[-1]] = cmd def _generate_command_tree( self, commands: Dict[CommandName_T, Command]) -> Dict[str, Union[Dict, Command]]: """Generate command tree from commands dictionary. Args: commands (Dict[CommandName_T, Command]): Dictionary of commands Returns: Dict[str, Union[Dict, "Command"]]: Command tree """ cmd_tree = {} #type: Dict[str, Union[Dict, "Command"]] for cmd_name, cmd in commands.items(): self._add_command_to_tree(cmd_name, cmd, cmd_tree) return cmd_tree def _find_command(self, name: Union[str, CommandName_T]) -> Optional[Command]: cmd_name = (name,) if isinstance(name, str) else name if not cmd_name: return None # cmd_tree = self._generate_command_tree({ # name: cmd # for name, cmd in self.commands.items() # if self.switches.get(cmd, True) # }) # for part in cmd_name[:-1]: # if part not in cmd_tree or not isinstance( # cmd_tree[part], #type: ignore # dict): # return None # cmd_tree = cmd_tree[part] # type: ignore # cmd = cmd_tree.get(cmd_name[-1]) # type: ignore # return cmd if isinstance(cmd, Command) else None cmd = { name: cmd for name, cmd in self.commands.items() if self.switches.get(cmd, True) }.get(cmd_name) return cmd def parse_command( self, bot: NoneBot, event: CQEvent) -> Tuple[Optional[Command], Optional[str]]: cmd_string = str(event.message).lstrip() logger.debug(f'Parsing command: {repr(cmd_string)}') matched_start = None for start in bot.config.COMMAND_START: # loop through COMMAND_START to find the longest matched start curr_matched_start = None if isinstance(start, type(re.compile(''))): m = start.search(cmd_string) if m and m.start(0) == 0: curr_matched_start = m.group(0) elif isinstance(start, str): if cmd_string.startswith(start): curr_matched_start = start if curr_matched_start is not None and \ (matched_start is None or len(curr_matched_start) > len(matched_start)): # a longer start, use it matched_start = curr_matched_start if matched_start is None: # it's not a command logger.debug('It\'s not a command') return None, None logger.debug(f'Matched command start: ' f'{matched_start}{"(empty)" if not matched_start else ""}') full_command = cmd_string[len(matched_start):].lstrip() if not full_command: # command is empty return None, None cmd_name_text, *cmd_remained = full_command.split(maxsplit=1) cmd_name = None for sep in bot.config.COMMAND_SEP: # loop through COMMAND_SEP to find the most optimized split curr_cmd_name = None if isinstance(sep, type(re.compile(''))): curr_cmd_name = tuple(sep.split(cmd_name_text)) elif isinstance(sep, str): curr_cmd_name = tuple(cmd_name_text.split(sep)) if curr_cmd_name is not None and \ (not cmd_name or len(curr_cmd_name) > len(cmd_name)): # a more optimized split, use it cmd_name = curr_cmd_name if not cmd_name: cmd_name = (cmd_name_text,) logger.debug(f'Split command name: {cmd_name}') cmd = self._find_command(cmd_name) # type: ignore if not cmd: logger.debug(f'Command {cmd_name} not found. Try to match aliases') cmd = { name: cmd for name, cmd in self.aliases.items() if self.switches.get(cmd, True) }.get(cmd_name_text) if not cmd: logger.debug(f'Alias {cmd_name} not found. Try to match patterns') patterns = { pattern: cmd for pattern, cmd in self.patterns.items() if self.switches.get(cmd, True) } for pattern in patterns: if pattern.search(full_command): cmd = patterns[pattern] logger.debug( f'Pattern {pattern} of command {cmd.name} matched, function: {cmd.func}' ) # if command matched by regex, it will use the full_command as the current_arg of the session return cmd, full_command if not cmd: return None, None logger.debug(f'Command {cmd.name} found, function: {cmd.func}') return cmd, ''.join(cmd_remained) def switch_command(self, cmd_name: CommandName_T, state: Optional[bool] = None): """Change command state or simply switch it if `state` is None Args: cmd_name (CommandName_T): Command name state (Optional[bool]): State to change to. Defaults to None. """ cmd = self.commands[cmd_name] self.switches[cmd] = not self.switches[cmd] if state is None else bool( state) class CommandInterrupt(Exception): pass class _PauseException(CommandInterrupt): """ Raised by session.pause() indicating that the command session should be paused to ask the user for some arguments. """ pass class _FinishException(CommandInterrupt): """ Raised by session.finish() indicating that the command session should be stopped and removed. """ def __init__(self, result: bool = True): """ :param result: succeeded to call the command """ self.result = result class SwitchException(CommandInterrupt): """ Raised by session.switch() indicating that the command session should be stopped and replaced with a new one (going through handle_message() again). Since the new message will go through handle_message() again, the later function should be notified. So this exception is intended to be propagated to handle_message(). """ def __init__(self, new_message: Message): """ :param new_message: new message which should be placed in event """ self.new_message = new_message class CommandSession(BaseSession): __slots__ = ('cmd', 'current_key', 'current_arg_filters', '_current_send_kwargs', 'current_arg', '_current_arg_text', '_current_arg_images', '_state', '_last_interaction', '_running', '_run_future') def __init__(self, bot: NoneBot, event: CQEvent, cmd: Command, *, current_arg: Optional[str] = '', args: Optional[CommandArgs_T] = None): super().__init__(bot, event) self.cmd = cmd # Command object # unique key of the argument that is currently requesting (asking) self.current_key: Optional[str] = None # initialize current argument filters self.current_arg_filters: Optional[List[Filter_T]] = None self._current_send_kwargs: Dict[str, Any] = {} # initialize current argument self.current_arg: Optional[str] = '' # with potential CQ codes self._current_arg_text = None self._current_arg_images = None self.refresh(event, current_arg=current_arg) # fill the above self._run_future = partial(asyncio.run_coroutine_threadsafe, loop=bot.loop) self._state: State_T = {} if args: self._state.update(args) self._last_interaction = None # last interaction time of this session self._running = False @property def state(self) -> State_T: """ State of the session. This contains all named arguments and other session scope temporary values. """ return self._state @property def args(self) -> CommandArgs_T: """Deprecated. Use `session.state` instead.""" return self.state @property def running(self) -> bool: return self._running @running.setter def running(self, value) -> None: if self._running is True and value is False: # change status from running to not running, record the time self._last_interaction = datetime.now() self._running = value @property def is_valid(self) -> bool: """Check if the session is expired or not.""" if self.bot.config.SESSION_EXPIRE_TIMEOUT and \ self._last_interaction and \ datetime.now() - self._last_interaction > \ self.bot.config.SESSION_EXPIRE_TIMEOUT: return False return True @property def is_first_run(self) -> bool: return self._last_interaction is None @property def current_arg_text(self) -> str: """ Plain text part in the current argument, without any CQ codes. """ if self._current_arg_text is None: self._current_arg_text = Message( self.current_arg).extract_plain_text() return self._current_arg_text @property def current_arg_images(self) -> List[str]: """ Images (as list of urls) in the current argument. """ if self._current_arg_images is None: self._current_arg_images = [ s.data['url'] for s in Message(self.current_arg) if s.type == 'image' and 'url' in s.data ] return self._current_arg_images @property def argv(self) -> List[str]: """ Shell-like argument list, similar to sys.argv. Only available while shell_like is True in on_command decorator. """ return self.state.get('argv', []) def refresh(self, event: CQEvent, *, current_arg: Optional[str] = '') -> None: """ Refill the session with a new message event. :param event: new message event :param current_arg: new command argument as a string """ self.event = event self.current_arg = current_arg self._current_arg_text = None self._current_arg_images = None def get(self, key: str, *, prompt: Optional[Message_T] = None, arg_filters: Optional[List[Filter_T]] = None, **kwargs) -> Any: """ Get an argument with a given key. If the argument does not exist in the current session, a pause exception will be raised, and the caller of the command will know it should keep the session for further interaction with the user. :param key: argument key :param prompt: prompt to ask the user :param arg_filters: argument filters for the next user input :return: the argument value """ if key in self.state: return self.state[key] self.current_key = key self.current_arg_filters = arg_filters self._current_send_kwargs = kwargs self.pause(prompt, **kwargs) def get_optional(self, key: str, default: Optional[Any] = None) -> Optional[Any]: """ Simply get a argument with given key. Deprecated. Use `session.state.get()` instead. """ return self.state.get(key, default) def pause(self, message: Optional[Message_T] = None, **kwargs) -> None: """Pause the session for further interaction.""" if message: self._run_future(self.send(message, **kwargs)) raise _PauseException def finish(self, message: Optional[Message_T] = None, **kwargs) -> None: """Finish the session.""" if message: self._run_future(self.send(message, **kwargs)) raise _FinishException def switch(self, new_message: Message_T) -> None: """ Finish the session and switch to a new (fake) message event. The user may send another command (or another intention as natural language) when interacting with the current session. In this case, the session may not understand what the user is saying, so it should call this method and pass in that message, then NoneBot will handle the situation properly. """ if self.is_first_run: # if calling this method during first run, # we think the command is not handled raise _FinishException(result=False) if not isinstance(new_message, Message): new_message = Message(new_message) raise SwitchException(new_message) async def handle_command(bot: NoneBot, event: CQEvent, manager: CommandManager) -> Optional[bool]: """ Handle a message as a command. This function is typically called by "handle_message". :param bot: NoneBot instance :param event: message event :param manager: command manager :return: the message is handled as a command """ cmd, current_arg = manager.parse_command(bot, event) is_privileged_cmd = cmd and cmd.privileged if is_privileged_cmd and cmd.only_to_me and not event['to_me']: is_privileged_cmd = False disable_interaction = bool(is_privileged_cmd) if is_privileged_cmd: logger.debug(f'Command {cmd.name} is a privileged command') ctx_id = context_id(event) if not is_privileged_cmd: # wait for 1.5 seconds (at most) if the current session is running retry = 5 while retry > 0 and \ _sessions.get(ctx_id) and _sessions[ctx_id].running: retry -= 1 await asyncio.sleep(0.3) check_perm = True session: Optional[CommandSession] = _sessions.get( ctx_id) if not is_privileged_cmd else None if session is not None: if session.running: logger.warning(f'There is a session of command ' f'{session.cmd.name} running, notify the user') asyncio.ensure_future( send(bot, event, render_expression(bot.config.SESSION_RUNNING_EXPRESSION))) # pretend we are successful, so that NLP won't handle it return True if session.is_valid: logger.debug(f'Session of command {session.cmd.name} exists') # since it's in a session, the user must be talking to me event['to_me'] = True session.refresh(event, current_arg=str(event['message'])) # there is no need to check permission for existing session check_perm = False else: # the session is expired, remove it logger.debug(f'Session of command {session.cmd.name} is expired') if ctx_id in _sessions: del _sessions[ctx_id] session = None if session is None: if not cmd: logger.debug('Not a known command, ignored') return False if cmd.only_to_me and not event['to_me']: logger.debug('Not to me, ignored') return False SessionImpl = cmd.session_impl or CommandSession session = SessionImpl(bot, event, cmd, current_arg=current_arg) logger.debug(f'New session of command {session.cmd.name} created') assert isinstance(session, CommandSession) return await _real_run_command(session, ctx_id, check_perm=check_perm, disable_interaction=disable_interaction) async def call_command(bot: NoneBot, event: CQEvent, name: Union[str, CommandName_T], *, current_arg: str = '', args: Optional[CommandArgs_T] = None, check_perm: bool = True, disable_interaction: bool = False) -> Optional[bool]: """ Call a command internally. This function is typically called by some other commands or "handle_natural_language" when handling NLPResult object. Note: If disable_interaction is not True, after calling this function, any previous command session will be overridden, even if the command being called here does not need further interaction (a.k.a asking the user for more info). :param bot: NoneBot instance :param event: message event :param name: command name :param current_arg: command current argument string :param args: command args :param check_perm: should check permission before running command :param disable_interaction: disable the command's further interaction :return: the command is successfully called """ cmd = CommandManager()._find_command(name) if not cmd: return False SessionImplement = cmd.session_impl or CommandSession session = SessionImplement(bot, event, cmd, current_arg=current_arg, args=args) return await _real_run_command(session, context_id(session.event), check_perm=check_perm, disable_interaction=disable_interaction) async def _real_run_command(session: CommandSession, ctx_id: str, disable_interaction: bool = False, **kwargs) -> Optional[bool]: if not disable_interaction: # override session only when interaction is not disabled _sessions[ctx_id] = session try: logger.debug(f'Running command {session.cmd.name}') session.running = True future = asyncio.ensure_future(session.cmd.run(session, **kwargs)) timeout = None if session.bot.config.SESSION_RUN_TIMEOUT: timeout = session.bot.config.SESSION_RUN_TIMEOUT.total_seconds() try: await asyncio.wait_for(future, timeout) handled = future.result() except asyncio.TimeoutError: handled = True except (_PauseException, _FinishException, SwitchException) as e: raise e except Exception as e: logger.error(f'An exception occurred while ' f'running command {session.cmd.name}:') logger.exception(e) handled = True raise _FinishException(handled) except _PauseException: session.running = False if disable_interaction: # if the command needs further interaction, we view it as failed return False logger.debug(f'Further interaction needed for ' f'command {session.cmd.name}') # return True because this step of the session is successful return True except (_FinishException, SwitchException) as e: session.running = False logger.debug(f'Session of command {session.cmd.name} finished') if not disable_interaction and ctx_id in _sessions: # the command is finished, remove the session, # but if interaction is disabled during this command call, # we leave the _sessions untouched. del _sessions[ctx_id] if isinstance(e, _FinishException): return e.result elif isinstance(e, SwitchException): # we are guaranteed that the session is not first run here, # which means interaction is definitely enabled, # so we can safely touch _sessions here. if ctx_id in _sessions: # make sure there is no session waiting del _sessions[ctx_id] logger.debug(f'Session of command {session.cmd.name} switching, ' f'new message: {e.new_message}') raise e # this is intended to be propagated to handle_message() def kill_current_session(event: CQEvent) -> None: """ Force kill current session of the given event context, despite whether it is running or not. :param event: message event """ ctx_id = context_id(event) if ctx_id in _sessions: del _sessions[ctx_id] from nonebot.command.group import CommandGroup
import cv2 import numpy as np import png import os import pydicom def get_png_filename(row): """ 'D1-0000_L_A-R_mass_malignant|triple negative.png' """ fname = get_filename(row) fname += ".png" # if not row['PatientName'] in PIDS_TO_CHECK else '-wrong.png' return fname def get_filename(row): fname = row['PatientName'] fname += f"_{row["ImageLaterality"]}" fname += f"_{"-".join(eval(row["PatientOrientation"]))}" fname += f"_{row["abnormality"]}" fname += f"_{row["classification"]}{"|"+row["subtype"] if row["subtype"] else ""}" return fname def erode(image, ksize=(10,10)): return cv2.erode(image, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, ksize=ksize)) def tophat(image, ksize=(10,100)): return cv2.morphologyEx(image, cv2.MORPH_TOPHAT, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, ksize=ksize)) def otsu(image): threshold, mask = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) return mask # def quantile_threshold(image, quantile): # lb = np.quantile(image, quantile) # threshold, mask = cv2.threshold(image, lb, 255, cv2.THRESH_BINARY) # return mask, threshold # def fixed_threshold(image, t): # lb = t # threshold, mask = cv2.threshold(image, lb, 255, cv2.THRESH_BINARY) # return mask, threshold def get_threshold_mask(image, t): if isinstance(t, str): if t.lower() != 'otsu': print('Unknown method named ', t) return None, None threshold, mask = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) return mask, threshold lb = np.quantile(image, t) if t < 1 else t threshold, mask = cv2.threshold(image, lb, 255, cv2.THRESH_BINARY) return mask, threshold def dilate(mask, ksize=(10,10)): return cv2.dilate(mask, kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE, ksize=ksize)) def bbox(mask): cnts, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #print(str(len(cnts))+' contours detected') # Find maximum area contour area = np.array([cv2.contourArea(cnts[i]) for i in range(len(cnts))]) if np.sum(area)>1: maxa_ind = np.argmax(area) # index of maximum area contour xx = [cnts[maxa_ind][i][0][0] for i in range(len(cnts[maxa_ind]))] yy = [cnts[maxa_ind][i][0][1] for i in range(len(cnts[maxa_ind]))] return [min(xx),max(xx),min(yy),max(yy)] return None def identify_bbox(image, erosion_ksize=(150,150), tophat_ksize=(160, 20), dilatation_ksize=(130,130)): """ Automatically identify potential lesions as in https://downloads.hindawi.com/journals/cmmm/2019/2717454.pdf Sec 3.2.1. ROIC Extraction Image is an openCV array of gray colors or an image filepath """ if isinstance(image, str): image = cv2.imread(image) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) image = erode(image, erosion_ksize) image = tophat(image, tophat_ksize) image = otsu(image) image = dilate(image, dilatation_ksize) roi = bbox(image) return roi def identify_bbox_v2(image, threshold=.999, erosion_ksize=(150,150), tophat_ksize=(160, 20), dilatation_ksize=(130,130)): """ Automatically identify potential lesions as in https://downloads.hindawi.com/journals/cmmm/2019/2717454.pdf Sec 3.2.1. ROIC Extraction Image is an openCV array of gray colors or an image filepath """ if isinstance(image, str): image = cv2.imread(image) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) eroded_img = erode(image, erosion_ksize) erosion_mask = eroded_img > 10 image_erosion = image.copy() image_erosion *= erosion_mask image = tophat(image_erosion, tophat_ksize) mask, th = get_threshold_mask(image, threshold) mask = dilate(mask, dilatation_ksize) roi = bbox(mask) return roi def dicom_to_png(file, dest_fp, overwrite=False, greyscale=True): if os.path.exists(dest_fp) and not overwrite: return ds = pydicom.dcmread(file) image_2d = ds.pixel_array#.astype(float) shape = image_2d.shape if shape != (2294, 1914): # logger.warning(f'Shape for image {file} is {shape} instead of (2294, 1914)') print(f'Shape for image {file} is {shape} instead of (2294, 1914)') # Write the PNG file with open(dest_fp, 'wb') as png_file: w = png.Writer(shape[1], shape[0], greyscale=greyscale) w.write(png_file, image_2d)#_scaled) return
import cv2 import numpy as np import png import os import pydicom def get_png_filename(row): """ 'D1-0000_L_A-R_mass_malignant|triple negative.png' """ fname = get_filename(row) fname += ".png" # if not row['PatientName'] in PIDS_TO_CHECK else '-wrong.png' return fname def get_filename(row): fname = row['PatientName'] fname += f"_{row['ImageLaterality']}" fname += f"_{'-'.join(eval(row['PatientOrientation']))}" fname += f"_{row['abnormality']}" fname += f"_{row['classification']}{'|'+row['subtype'] if row['subtype'] else ''}" return fname def erode(image, ksize=(10,10)): return cv2.erode(image, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, ksize=ksize)) def tophat(image, ksize=(10,100)): return cv2.morphologyEx(image, cv2.MORPH_TOPHAT, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, ksize=ksize)) def otsu(image): threshold, mask = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) return mask # def quantile_threshold(image, quantile): # lb = np.quantile(image, quantile) # threshold, mask = cv2.threshold(image, lb, 255, cv2.THRESH_BINARY) # return mask, threshold # def fixed_threshold(image, t): # lb = t # threshold, mask = cv2.threshold(image, lb, 255, cv2.THRESH_BINARY) # return mask, threshold def get_threshold_mask(image, t): if isinstance(t, str): if t.lower() != 'otsu': print('Unknown method named ', t) return None, None threshold, mask = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) return mask, threshold lb = np.quantile(image, t) if t < 1 else t threshold, mask = cv2.threshold(image, lb, 255, cv2.THRESH_BINARY) return mask, threshold def dilate(mask, ksize=(10,10)): return cv2.dilate(mask, kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE, ksize=ksize)) def bbox(mask): cnts, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #print(str(len(cnts))+' contours detected') # Find maximum area contour area = np.array([cv2.contourArea(cnts[i]) for i in range(len(cnts))]) if np.sum(area)>1: maxa_ind = np.argmax(area) # index of maximum area contour xx = [cnts[maxa_ind][i][0][0] for i in range(len(cnts[maxa_ind]))] yy = [cnts[maxa_ind][i][0][1] for i in range(len(cnts[maxa_ind]))] return [min(xx),max(xx),min(yy),max(yy)] return None def identify_bbox(image, erosion_ksize=(150,150), tophat_ksize=(160, 20), dilatation_ksize=(130,130)): """ Automatically identify potential lesions as in https://downloads.hindawi.com/journals/cmmm/2019/2717454.pdf Sec 3.2.1. ROIC Extraction Image is an openCV array of gray colors or an image filepath """ if isinstance(image, str): image = cv2.imread(image) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) image = erode(image, erosion_ksize) image = tophat(image, tophat_ksize) image = otsu(image) image = dilate(image, dilatation_ksize) roi = bbox(image) return roi def identify_bbox_v2(image, threshold=.999, erosion_ksize=(150,150), tophat_ksize=(160, 20), dilatation_ksize=(130,130)): """ Automatically identify potential lesions as in https://downloads.hindawi.com/journals/cmmm/2019/2717454.pdf Sec 3.2.1. ROIC Extraction Image is an openCV array of gray colors or an image filepath """ if isinstance(image, str): image = cv2.imread(image) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) eroded_img = erode(image, erosion_ksize) erosion_mask = eroded_img > 10 image_erosion = image.copy() image_erosion *= erosion_mask image = tophat(image_erosion, tophat_ksize) mask, th = get_threshold_mask(image, threshold) mask = dilate(mask, dilatation_ksize) roi = bbox(mask) return roi def dicom_to_png(file, dest_fp, overwrite=False, greyscale=True): if os.path.exists(dest_fp) and not overwrite: return ds = pydicom.dcmread(file) image_2d = ds.pixel_array#.astype(float) shape = image_2d.shape if shape != (2294, 1914): # logger.warning(f'Shape for image {file} is {shape} instead of (2294, 1914)') print(f'Shape for image {file} is {shape} instead of (2294, 1914)') # Write the PNG file with open(dest_fp, 'wb') as png_file: w = png.Writer(shape[1], shape[0], greyscale=greyscale) w.write(png_file, image_2d)#_scaled) return
#!/usr/bin/env python3 import os import re import sys script_dir = os.path.dirname(__file__) directory_to_check = os.path.join(script_dir, "../book/content/") def remove_comments(text_string): """Function to omit html comment identifiers in a text string using regular expression matches Arguments: text_string {string} -- The text to be matched Returns: {string} -- The input text string with html comments removed """ p = re.sub("(?s)<!--(.*?)-->", "", text_string) return p def get_lines(text_string, sub_string): """Get individual lines in a text file Arguments: text_string {string} -- The text string to test sub_string {string} -- The conditional string to perform splitting on Returns: {list} -- A list of split strings """ lines = [line for line in text_string.split("\n") if sub_string in line] return lines def get_files(directory): """Get a list of files to be checked. Ignores image files. Arguments: directory {string} -- The directory containing the files to check Returns: {list} -- List of files to check """ files = [] filetypes_to_ignore = (".png", ".jpg") for rootdir, _, filenames in os.walk(directory): for filename in filenames: if not filename.endswith(filetypes_to_ignore): files.append(os.path.join(rootdir, filename)) return files def read_and_check_files(files): """Function to read in files, remove html comments and check for bad latin phrases Arguments: files {list} -- List of filenames to be checked Returns: {dict} -- Dictionary: Top level keys are absolute filepaths to files that failed the check. Each of these has two keys: 'latin_type' containing the unwanted latin phrase, and 'line' containing the offending line. """ failing_files = {} bad_latin = ["i.e.", "e.g.", "e.t.c.", " etc", " ie"] for filename in files: with open(filename, encoding="utf8", errors="ignore") as f: text = f.read() text = remove_comments(text) for latin_type in bad_latin: if latin_type in text.lower(): lines = get_lines(text.lower(), latin_type) for line in lines: failing_files[os.path.abspath(filename)] = { "latin_type": latin_type, "line": line, } return failing_files def construct_error_message(files_dict): """Function to construct an error message pointing out where bad latin phrases appear in lines of text Arguments: files_dict {dictionary} -- Dictionary of failing files containing the bad latin phrases and offending lines Returns: {string} -- The error message to be raised """ error_message = ["Bad latin found in the following files:\n"] for file in files_dict.keys(): error_message.append( f"{file}:\t{files_dict[file]["latin_type"]}\tfound in line\t[{files_dict[file]["line"]}]\n" ) return "\n".join(error_message) def main(): files = get_files(directory_to_check) failing_files = read_and_check_files(files) if bool(failing_files): error_message = construct_error_message(failing_files) raise Exception(error_message) if __name__ == "__main__": main()
#!/usr/bin/env python3 import os import re import sys script_dir = os.path.dirname(__file__) directory_to_check = os.path.join(script_dir, "../book/content/") def remove_comments(text_string): """Function to omit html comment identifiers in a text string using regular expression matches Arguments: text_string {string} -- The text to be matched Returns: {string} -- The input text string with html comments removed """ p = re.sub("(?s)<!--(.*?)-->", "", text_string) return p def get_lines(text_string, sub_string): """Get individual lines in a text file Arguments: text_string {string} -- The text string to test sub_string {string} -- The conditional string to perform splitting on Returns: {list} -- A list of split strings """ lines = [line for line in text_string.split("\n") if sub_string in line] return lines def get_files(directory): """Get a list of files to be checked. Ignores image files. Arguments: directory {string} -- The directory containing the files to check Returns: {list} -- List of files to check """ files = [] filetypes_to_ignore = (".png", ".jpg") for rootdir, _, filenames in os.walk(directory): for filename in filenames: if not filename.endswith(filetypes_to_ignore): files.append(os.path.join(rootdir, filename)) return files def read_and_check_files(files): """Function to read in files, remove html comments and check for bad latin phrases Arguments: files {list} -- List of filenames to be checked Returns: {dict} -- Dictionary: Top level keys are absolute filepaths to files that failed the check. Each of these has two keys: 'latin_type' containing the unwanted latin phrase, and 'line' containing the offending line. """ failing_files = {} bad_latin = ["i.e.", "e.g.", "e.t.c.", " etc", " ie"] for filename in files: with open(filename, encoding="utf8", errors="ignore") as f: text = f.read() text = remove_comments(text) for latin_type in bad_latin: if latin_type in text.lower(): lines = get_lines(text.lower(), latin_type) for line in lines: failing_files[os.path.abspath(filename)] = { "latin_type": latin_type, "line": line, } return failing_files def construct_error_message(files_dict): """Function to construct an error message pointing out where bad latin phrases appear in lines of text Arguments: files_dict {dictionary} -- Dictionary of failing files containing the bad latin phrases and offending lines Returns: {string} -- The error message to be raised """ error_message = ["Bad latin found in the following files:\n"] for file in files_dict.keys(): error_message.append( f"{file}:\t{files_dict[file]['latin_type']}\tfound in line\t[{files_dict[file]['line']}]\n" ) return "\n".join(error_message) def main(): files = get_files(directory_to_check) failing_files = read_and_check_files(files) if bool(failing_files): error_message = construct_error_message(failing_files) raise Exception(error_message) if __name__ == "__main__": main()
#!/usr/bin/env python3 from discord.permissions import permission_alias import requests from urllib.parse import urlsplit, parse_qs import base64 import time import json from datetime import datetime class MYGES: def __init__(self, discordid, username=None, password=None): self.actionurl = "https://api.kordis.fr" self.discordid = discordid if username!=None and password!=None: self.username = username; self.password = password self.register() if self.is_registered(): self.userobj = self.get_userbydiscordid() self.token = self.get_token(self.userobj["BasicToken"]) #Permet de récupérer le bearer token def is_registered(self): users = open('users.json', 'r') jsonusers = json.load(users) for user in jsonusers: if user["DiscordId"] == self.discordid: return True return False def register(self): users = open('users.json', 'r') jsonusers = json.load(users) if self.is_registered(): return jsonusers.append({ "DiscordId": self.discordid, "BasicToken" : self.get_basictoken() }) usersout = open('users.json', 'w') json.dump(jsonusers, usersout, indent=4, separators=(',',': ')) def get_userbydiscordid(self): users = open('users.json', 'r') jsonusers = json.load(users) for user in jsonusers: if user["DiscordId"] == self.discordid: return user def get_basictoken(self): token = base64.b64encode(f"{self.username}:{self.password}".encode()).decode('UTF-8') return {"Authorization": f"Basic {token}"} def get_token(self, token): urltoken = 'https://authentication.kordis.fr/oauth/authorize?response_type=token&client_id=skolae-app' try: r = requests.get(urltoken, headers=token) except requests.exceptions.InvalidSchema as e: urltokenreturn = repr(e).split("'")[1].replace("#", '?') tokenret = {k: v[0] for k, v in parse_qs(urlsplit(urltokenreturn).query).items()} return {"Authorization" : f"{tokenret["token_type"]} {tokenret["access_token"]}"} def get_info(self): print(self.token) return requests.get(f"{self.actionurl}/me/profile", headers=self.token).json()["result"] def get_agenda(self, start, end): return requests.get(f"{self.actionurl}/me/agenda?start={start}&end={end}", headers=self.token).json()["result"] def get_absences(self, year): return requests.get(f"{self.actionurl}/me/{year}/absences", headers=self.token).json()["result"] def get_courses(self, year): return requests.get(f"{self.actionurl}/me/{year}/courses", headers=self.token).json()["result"] def get_grades(self, year): return requests.get(f"{self.actionurl}/me/{year}/grades", headers=self.token).json()["result"] def get_partners(self): return requests.get(f"{self.actionurl}/me/partners", headers=self.token).json()["result"] def get_school(self): return requests.get(f"{self.actionurl}/me/schools", headers=self.token).json()["result"][0] def get_students(self, year): return requests.get(requests.get(f"{self.actionurl}/me/{year}/classes", headers=self.token).json()["result"][0]["links"][1]["href"], headers=self.token).json()["result"] def get_news(self): return requests.get(f"{self.actionurl}/me/news", headers=self.token).json()["result"] # All method used for display api informations def print_info(self): data = self.get_info() for key in data: if key == 'mailing': break print(f"{key} : {data[key]}") def print_moyenne(self, year="2021"): """Permet d'afficher la moyenne de l'utilisateur""" jsondata = self.get_grades(year) somme_notes = 0 nombre_de_notes = 0 for row in jsondata["result"]: #Parcours le fichier JSON moyenne_matiere = 0 if row['grades']: for i in row['grades']: moyenne_matiere += i moyenne_matiere = moyenne_matiere / len(row['grades']) try: somme_notes += moyenne_matiere * int(float(row['coef'])) nombre_de_notes += int(float(row['coef'])) except: continue moyenne = somme_notes / nombre_de_notes print(f"Vous avez {round(moyenne, 2)}/20 de moyenne générale.") def print_agenda(self, start, end): data = self.get_agenda(start, end) for row in data: ctr = '' debut_du_cours = datetime.fromtimestamp(row["start_date"] / 1000) fin_du_cours = datetime.fromtimestamp(row["end_date"] / 1000) prof = row["teacher"] matiere = row["name"] type_de_cours = row["modality"] room_info = row['rooms'] if room_info == None: room_number = None etage = None campus = None else: for key in room_info: room_number = key['name'] etage = key['floor'] campus = key['campus'] print(f"Le prochain cours aura lieu au campus {campus}, à l'étage {etage} salle numéro {room_number}") print(f"Il commencera à {debut_du_cours} et finira à {fin_du_cours}, il sera dirigé par {prof} qui vous enseignera {matiere}") def print_absences(self, start, end): for row in self.get_absences(start, end): date = time.strftime('%d-%m-%Y %H:%M', time.localtime(int(str(row['date'])[:-3]))) just = "Jusitifiée" if row["justified"] else "Non justifiée" print(f'{date}\t{just}\t{row['course_name']}') def print_courses(self, year): data = self.get_courses(year) for classes in data: ctr = '' for key in classes: print(f"{ctr}{key} : {classes[key]}") ctr = '\t' print() def print_grades(self, year): data = self.get_grades(year) for classes in data: ctr = '' for key in classes: print(f"{ctr}{key} : {classes[key]}") ctr = '\t' print() def print_partners(self): data = self.get_partners() for row in data: ctr = '' for key in row: print(f"{ctr}{key} : {row[key]}") ctr = '\t' print() def print_school(self): data = self.get_school() for row in data: print(f"{row}: {data[row]}") def print_students(self, year): data = self.get_students(year) for classes in data: ctr = '' for key in classes: print(f"{ctr}{key} : {classes[key]}") ctr = '\t' print() def print_news(self): data = self.get_news() for key in data: news_info = data['content'] for info in data['content']: for row in info: title_news = info['title'] author_news = info['author'] date_news = datetime.fromtimestamp(info["date"] / 1000) date_news = date_news.strftime("%d/%m/%Y") for i in info['links']: if "https://www.myges.fr/#/actualites" in i['href']: link_news = i['href'] print(f"L'article a pour titre {title_news}, il a été écrit par {author_news} et a été publié le {date_news}. Lien de l'article ici:{link_news}") def main(): myges = MYGES("6666", "", "" ) print(""" 88888888b .d88888b .88888. dP 888888ba .88888. d888888P 88 88. "' d8' `88 88 88 `8b d8' `8b 88 a88aaaa `Y88888b. 88 88 a88aaaa8P' 88 88 88 88 `8b 88 YP88 88 88 `8b. 88 88 88 88 d8' .8P Y8. .88 88 88 .88 Y8. .8P 88 88888888P Y88888P `88888' dP 88888888P `8888P' dP ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo """) today = int(round(time.time() * 1000)) year = 31536000000 myges.print_moyenne() #myges.print_info() if __name__ == '__main__': main()
#!/usr/bin/env python3 from discord.permissions import permission_alias import requests from urllib.parse import urlsplit, parse_qs import base64 import time import json from datetime import datetime class MYGES: def __init__(self, discordid, username=None, password=None): self.actionurl = "https://api.kordis.fr" self.discordid = discordid if username!=None and password!=None: self.username = username; self.password = password self.register() if self.is_registered(): self.userobj = self.get_userbydiscordid() self.token = self.get_token(self.userobj["BasicToken"]) #Permet de récupérer le bearer token def is_registered(self): users = open('users.json', 'r') jsonusers = json.load(users) for user in jsonusers: if user["DiscordId"] == self.discordid: return True return False def register(self): users = open('users.json', 'r') jsonusers = json.load(users) if self.is_registered(): return jsonusers.append({ "DiscordId": self.discordid, "BasicToken" : self.get_basictoken() }) usersout = open('users.json', 'w') json.dump(jsonusers, usersout, indent=4, separators=(',',': ')) def get_userbydiscordid(self): users = open('users.json', 'r') jsonusers = json.load(users) for user in jsonusers: if user["DiscordId"] == self.discordid: return user def get_basictoken(self): token = base64.b64encode(f"{self.username}:{self.password}".encode()).decode('UTF-8') return {"Authorization": f"Basic {token}"} def get_token(self, token): urltoken = 'https://authentication.kordis.fr/oauth/authorize?response_type=token&client_id=skolae-app' try: r = requests.get(urltoken, headers=token) except requests.exceptions.InvalidSchema as e: urltokenreturn = repr(e).split("'")[1].replace("#", '?') tokenret = {k: v[0] for k, v in parse_qs(urlsplit(urltokenreturn).query).items()} return {"Authorization" : f"{tokenret['token_type']} {tokenret['access_token']}"} def get_info(self): print(self.token) return requests.get(f"{self.actionurl}/me/profile", headers=self.token).json()["result"] def get_agenda(self, start, end): return requests.get(f"{self.actionurl}/me/agenda?start={start}&end={end}", headers=self.token).json()["result"] def get_absences(self, year): return requests.get(f"{self.actionurl}/me/{year}/absences", headers=self.token).json()["result"] def get_courses(self, year): return requests.get(f"{self.actionurl}/me/{year}/courses", headers=self.token).json()["result"] def get_grades(self, year): return requests.get(f"{self.actionurl}/me/{year}/grades", headers=self.token).json()["result"] def get_partners(self): return requests.get(f"{self.actionurl}/me/partners", headers=self.token).json()["result"] def get_school(self): return requests.get(f"{self.actionurl}/me/schools", headers=self.token).json()["result"][0] def get_students(self, year): return requests.get(requests.get(f"{self.actionurl}/me/{year}/classes", headers=self.token).json()["result"][0]["links"][1]["href"], headers=self.token).json()["result"] def get_news(self): return requests.get(f"{self.actionurl}/me/news", headers=self.token).json()["result"] # All method used for display api informations def print_info(self): data = self.get_info() for key in data: if key == 'mailing': break print(f"{key} : {data[key]}") def print_moyenne(self, year="2021"): """Permet d'afficher la moyenne de l'utilisateur""" jsondata = self.get_grades(year) somme_notes = 0 nombre_de_notes = 0 for row in jsondata["result"]: #Parcours le fichier JSON moyenne_matiere = 0 if row['grades']: for i in row['grades']: moyenne_matiere += i moyenne_matiere = moyenne_matiere / len(row['grades']) try: somme_notes += moyenne_matiere * int(float(row['coef'])) nombre_de_notes += int(float(row['coef'])) except: continue moyenne = somme_notes / nombre_de_notes print(f"Vous avez {round(moyenne, 2)}/20 de moyenne générale.") def print_agenda(self, start, end): data = self.get_agenda(start, end) for row in data: ctr = '' debut_du_cours = datetime.fromtimestamp(row["start_date"] / 1000) fin_du_cours = datetime.fromtimestamp(row["end_date"] / 1000) prof = row["teacher"] matiere = row["name"] type_de_cours = row["modality"] room_info = row['rooms'] if room_info == None: room_number = None etage = None campus = None else: for key in room_info: room_number = key['name'] etage = key['floor'] campus = key['campus'] print(f"Le prochain cours aura lieu au campus {campus}, à l'étage {etage} salle numéro {room_number}") print(f"Il commencera à {debut_du_cours} et finira à {fin_du_cours}, il sera dirigé par {prof} qui vous enseignera {matiere}") def print_absences(self, start, end): for row in self.get_absences(start, end): date = time.strftime('%d-%m-%Y %H:%M', time.localtime(int(str(row['date'])[:-3]))) just = "Jusitifiée" if row["justified"] else "Non justifiée" print(f'{date}\t{just}\t{row["course_name"]}') def print_courses(self, year): data = self.get_courses(year) for classes in data: ctr = '' for key in classes: print(f"{ctr}{key} : {classes[key]}") ctr = '\t' print() def print_grades(self, year): data = self.get_grades(year) for classes in data: ctr = '' for key in classes: print(f"{ctr}{key} : {classes[key]}") ctr = '\t' print() def print_partners(self): data = self.get_partners() for row in data: ctr = '' for key in row: print(f"{ctr}{key} : {row[key]}") ctr = '\t' print() def print_school(self): data = self.get_school() for row in data: print(f"{row}: {data[row]}") def print_students(self, year): data = self.get_students(year) for classes in data: ctr = '' for key in classes: print(f"{ctr}{key} : {classes[key]}") ctr = '\t' print() def print_news(self): data = self.get_news() for key in data: news_info = data['content'] for info in data['content']: for row in info: title_news = info['title'] author_news = info['author'] date_news = datetime.fromtimestamp(info["date"] / 1000) date_news = date_news.strftime("%d/%m/%Y") for i in info['links']: if "https://www.myges.fr/#/actualites" in i['href']: link_news = i['href'] print(f"L'article a pour titre {title_news}, il a été écrit par {author_news} et a été publié le {date_news}. Lien de l'article ici:{link_news}") def main(): myges = MYGES("6666", "", "" ) print(""" 88888888b .d88888b .88888. dP 888888ba .88888. d888888P 88 88. "' d8' `88 88 88 `8b d8' `8b 88 a88aaaa `Y88888b. 88 88 a88aaaa8P' 88 88 88 88 `8b 88 YP88 88 88 `8b. 88 88 88 88 d8' .8P Y8. .88 88 88 .88 Y8. .8P 88 88888888P Y88888P `88888' dP 88888888P `8888P' dP ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo """) today = int(round(time.time() * 1000)) year = 31536000000 myges.print_moyenne() #myges.print_info() if __name__ == '__main__': main()
import hashlib import re from sqlalchemy.exc import IntegrityError from atst.database import db from atst.domain.exceptions import AlreadyExistsError def first_or_none(predicate, lst): return next((x for x in lst if predicate(x)), None) def getattr_path(obj, path, default=None): _obj = obj for item in path.split("."): if isinstance(_obj, dict): _obj = _obj.get(item) else: _obj = getattr(_obj, item, default) return _obj def camel_to_snake(camel_cased): s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel_cased) return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() def snake_to_camel(snake_cased): parts = snake_cased.split("_") return f"{parts[0]}{"".join([w.capitalize() for w in parts[1:]])}" def pick(keys, dct): _keys = set(keys) return {k: v for (k, v) in dct.items() if k in _keys} def commit_or_raise_already_exists_error(message): try: db.session.commit() except IntegrityError: db.session.rollback() raise AlreadyExistsError(message) def sha256_hex(string): hsh = hashlib.sha256(string.encode()) return hsh.digest().hex()
import hashlib import re from sqlalchemy.exc import IntegrityError from atst.database import db from atst.domain.exceptions import AlreadyExistsError def first_or_none(predicate, lst): return next((x for x in lst if predicate(x)), None) def getattr_path(obj, path, default=None): _obj = obj for item in path.split("."): if isinstance(_obj, dict): _obj = _obj.get(item) else: _obj = getattr(_obj, item, default) return _obj def camel_to_snake(camel_cased): s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel_cased) return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() def snake_to_camel(snake_cased): parts = snake_cased.split("_") return f"{parts[0]}{''.join([w.capitalize() for w in parts[1:]])}" def pick(keys, dct): _keys = set(keys) return {k: v for (k, v) in dct.items() if k in _keys} def commit_or_raise_already_exists_error(message): try: db.session.commit() except IntegrityError: db.session.rollback() raise AlreadyExistsError(message) def sha256_hex(string): hsh = hashlib.sha256(string.encode()) return hsh.digest().hex()
#!/usr/bin/python3 # -*- coding: utf-8 -*- __author__ = 'Richard J. Sears' VERSION = "0.6 (2021-04-22)" ### Simple python script that helps to move my chia plots from my plotter to ### my nas. I wanted to use netcat as it was much faster on my 10GBe link than ### rsync and the servers are secure so I wrote this script to manage that ### move process. It will get better with time as I add in error checking and ### other things like notifications and stuff. # Updates # # V0.6 2021-04-22 # - Check Chia logs and report actual plots being farmed (per Chia) and # total amount of drive space in use (also per Chia). It is not # uncommon for the total number of plots on your system to be slightly # different that what `drive_manager.py` reports due to plot moves, etc # but if there is a large difference, you should check your logs for # signs of other issues. # # V0.5 2021-04-22 # - Updated to support local plot management via `move_local_plots.py` # # V0.4 2021-04-13 # - Added ability to "offline" a drive for maintenance. Before, the script would # select the very first available drive (based on drive number: drive0, drive1) # for plot storage. I ran into a problem when one of my drives kicked out a # smartctl error and I needed to move the plots off of it before it failed. As # soon as I started to move them, drive_manager.py started to fill the drive # back up. So now you can offline and drive_manager will not use it until # you online it again. You still need to go into your chia harvester config # and remove the drive from there. # # # V0.3 2021-04-04 # - Added multiple command line utilities to drive_manager.py including: # * -dr or --drive_report Immediately runs the Daily Report and sends email (if configured) # * -ct or --check_temps Checks the temperatures of all configured plot drives # * -pr or --plot_report Quick plot report like email report but to the screen # * -ud or --update_daily Designed to be called from cron, updates daily plot stats (speed, etc) # Be careful if using it from the command line, it resets your stats. This # should be run once per day from a cronjob. # # - Added plot time information to the daily email report including: # * Total Plots last 24 hours # * Average Plots per Hour (last 24 hours) # * Average Plotting Speed (last 24 hours) # # V0.2 2021-30-23 # - Moved system logging types to plot_manager_config and updated necessary functions. # - Added per_plot system notification function (send_new_plot_notification()) # - Updated read_config_data() to support ConfigParser boolean returns # - Updated necessary functions for read_config_data() change import os import sys mount_path = "/mnt/usb/" main_path = "/home/pw/chia_plot_manager/chianas/" sys.path.append(main_path) import subprocess import shutil import psutil from pySMART import Device, DeviceList # CAUTION - DO NOT use PyPI version, use https://github.com/truenas/py-SMART from psutil._common import bytes2human import logging from system_logging import setup_logging from system_logging import read_logging_config import system_info from pushbullet import Pushbullet, errors as pb_errors from twilio.rest import Client from twilio.base.exceptions import TwilioRestException import configparser from jinja2 import Environment, PackageLoader, select_autoescape from datetime import datetime from datetime import timedelta import time config = configparser.ConfigParser() import argparse import textwrap from natsort import natsorted import mmap # Define some colors for our help message red='\033[0;31m' yellow='\033[0;33m' green='\033[0;32m' white='\033[0;37m' blue='\033[0;34m' nc='\033[0m' import sentry_sdk #sentry_sdk.init( # "https://xxxxxxxxxxxxxxxxxxxxxxx.ingest.sentry.io/xxxxxxxxx", # # # Set traces_sample_rate to 1.0 to capture 100% # # of transactions for performance monitoring. # # We recommend adjusting this value in production. # traces_sample_rate=1.0 #) from sentry_sdk import capture_exception # Let's do some housekeeping nas_server = 'chianas01' plot_size_k = 108995911228 plot_size_g = 101.3623551 receive_script = main_path + 'receive_plot.sh' chia_log_file = '/home/pw/.chia/mainnet/log/debug.log' # Date and Time Stuff today = datetime.today().strftime('%A').lower() current_military_time = datetime.now().strftime('%H:%M:%S') current_timestamp = int(time.time()) # Setup Module logging. Main logging is configured in system_logging.py setup_logging() level = read_logging_config('plot_manager_config', 'system_logging', 'log_level') level = logging._checkLevel(level) log = logging.getLogger(__name__) log.setLevel(level) # Define our help message class RawFormatter(argparse.HelpFormatter): def _fill_text(self, text, width, indent): return "\n".join( [textwrap.fill(line, width) for line in textwrap.indent(textwrap.dedent(text), indent).splitlines()]) program_descripton = f''' {red}******** {green}ChiaNAS Drive Manager{nc} - {blue}{VERSION}{red} ********{nc} Running drive_manager.py with no arguments causes drive_manager to run in '{yellow}normal{nc}' mode. In this mode {green}drive_manager{nc} will check the drive utilization and update which drive your Chia plots will be sent to when they arrive from your plotter. This is generally called from a cronjob on a regular basis. Please read the full information about how it works on my github page. There are several commandline switches you can use to get immediate reports and feedback: {green}-dr {nc}or{green} --drive_report{nc} {blue}Runs the Daily ChiaNAS Report (if configured), and emails it to you. This can be called from a crontab job as well.{nc} {green}-ct {nc}or{green} --check_temps{blue} This will query all of your hard drives using {yellow}smartctl{blue} and return a list of drive temperatures to you. {green}-pr {nc}or{green} --plot_report{blue} This queries the NAS and returns a report letting you know how many plots are currently on the system and how many more you can add based on the current drive configuration. It also includes plotting speed information for the last 24 hours.{nc} {green}-ud {nc}or{green} --update_daily{blue} This updates the total number of plots the system has created over the past 24 hours. Use with {nc}CAUTION!{blue}. This {nc}should{blue} be ran from crontab once every 24 hours only! It updates the total from the last time is was run until now, hence why you should only run this once per 24 hours.{nc} {green}-off {nc}or{green} --offline_hdd{blue} This takes a drive as it's input (for example {yellow} drive6{blue}) and "{red}offlines{blue}" it so that no more plots will get written to it. You must {green}--on{blue} or {green}--online_hdd{blue} the drive for it to be used again. Useful if the drive is failing and needs to be replaced. You cannot "{red}offline{blue} a drive that is not mounted. {green}-on {nc}or{green} --online_hdd{blue} This takes a drive as it's input (for example {yellow} drive6{blue}) and "{green}onlines{blue}" it so that plots will get written to it. This option will be {nc}UNAVAILABLE{blue} if there are no drives that have been offlined!{nc} USAGE: ''' # Grab command line arguments if there are any def init_argparser(): with open('offlined_drives', 'r') as offlined_drives_list: offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()] parser = argparse.ArgumentParser(description=program_descripton, formatter_class=RawFormatter) parser.add_argument('-v', '--version', action='version', version=f'{parser.prog} {VERSION}') parser.add_argument('-dr', '--daily_report', action='store_true', help='Run the ChiaPlot Daily Email Report and exit') parser.add_argument('-ct', '--check_temps', action='store_true', help='Return a list of drives and their temperatures and exit') parser.add_argument('-pr', '--plot_report', action='store_true', help='Return the total # of plots on the system and total you can add and exit') parser.add_argument('-ud', '--update_daily', action='store_true', help=f'Updates 24 hour plot count. {red}USE WITH CAUTION, USE WITH CRONTAB{nc}') parser.add_argument('-off', '--offline_hdd', action='store', help=f'Offline a specific drive. Use drive number: {green}drive6{nc}') if offlined_drives != []: parser.add_argument('-on', '--online_hdd', action='store', help=f'Online a specific drive.' , choices=offlined_drives) return parser def get_offlined_drives(): with open('offlined_drives', 'r') as offlined_drives_list: offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()] if offlined_drives != None: return offlined_drives else: return False # Setup to read and write to our config file. # If we are expecting a boolean back pass True/1 for bool, # otherwise False/0 def read_config_data(file, section, item, bool): pathname = main_path + file config.read(pathname) if bool: return config.getboolean(section, item) else: return config.get(section, item) def update_config_data(file, section, item, value): pathname = main_path + file config.read(pathname) cfgfile = open(pathname, 'w') config.set(section, item, value) config.write(cfgfile) cfgfile.close() def get_drive_info(action, drive): """ This allows us to query specific information about our drives including temperatures, smart assessments, and space available to use for plots. It allows us to simply hand it a drive number (drive0, drive22, etc) and will present us with the data back. This utilizes pySMART, but a word of caution, use the TrueNAS versions linked to above, the PiPy version has a bug! """ if action == 'device': plot_drives = get_list_of_plot_drives() device = [hd for hd in plot_drives if hd[0] == (get_mountpoint_by_drive_number(drive)[0])] if device != []: device = [hd for hd in plot_drives if hd[0] == (get_mountpoint_by_drive_number(drive)[0])] return device[0][1] if action == 'temperature': return Device(get_device_info_by_drive_number(drive)[0][1]).temperature if action == 'capacity': return Device(get_device_info_by_drive_number(drive)[0][1]).capacity if action == 'health': return Device(get_device_info_by_drive_number(drive)[0][1]).assessment if action == 'name': return Device(get_device_info_by_drive_number(drive)[0][1]).name if action == 'serial': return Device(get_device_info_by_drive_number(drive)[0][1]).serial if action == 'space_total': return int(bytesto(shutil.disk_usage(get_device_info_by_drive_number(drive)[0][0])[0], 'g')) if action == 'space_used': return int(bytesto(shutil.disk_usage(get_device_info_by_drive_number(drive)[0][0])[1], 'g')) if action == 'space_free': return int(bytesto(shutil.disk_usage(get_device_info_by_drive_number(drive)[0][0])[2], 'g')) if action == 'space_free_plots': return int(bytesto(shutil.disk_usage(get_device_info_by_drive_number(drive)[0][0])[2], 'g') / plot_size_g) if action == 'space_free_plots_by_mountpoint': return int(bytesto(shutil.disk_usage(drive)[2], 'g') / plot_size_g) if action == 'total_current_plots': return int(bytesto(shutil.disk_usage(get_mountpoint_by_drive_number(drive)[0])[1], 'g') / plot_size_g) if action == 'total_current_plots_by_mountpoint': return int(bytesto(shutil.disk_usage(drive)[1], 'g') / plot_size_g) def dev_test(drive): return shutil.disk_usage(drive) #return Device(drive) def get_drive_by_mountpoint(mountpoint): """ This accepts a mountpoint ('/mnt/x/rear/column2/drive32') and returns the drive: drive32 """ return (mountpoint.split("/")[-1]) def get_mountpoint_by_drive_number(drive): """ This accepts a drive number (drive0) and returns the device assignment: /dev/sda1 and mountpoint: /mnt/x0/front/column0/drive0 """ partitions = psutil.disk_partitions(all=False) for p in partitions: if p.device.startswith('/dev/sd') and p.mountpoint.startswith(mount_path) and p.mountpoint.endswith(drive): return [(p.mountpoint)] def get_device_info_by_drive_number(drive): """ This accepts a drive number (drive0) and returns the device assignment: /dev/sda1 and mountpoint """ partitions = psutil.disk_partitions(all=False) for p in partitions: if p.device.startswith('/dev/sd') and p.mountpoint.startswith(mount_path) and p.mountpoint.endswith(drive): return [(p.mountpoint, p.device)] def get_device_by_mountpoint(mountpoint): """ This accepts a mountpoint and returns the device assignment: /dev/sda1 and mountpoint """ partitions = psutil.disk_partitions(all=False) for p in partitions: if p.device.startswith('/dev/sd') and p.mountpoint.startswith(mountpoint): return [(p.mountpoint, p.device)] def get_mountpoint_by_device(device): """ This accepts a mountpoint and returns the device assignment: /dev/sda1 and mountpoint """ partitions = psutil.disk_partitions(all=False) for p in partitions: if p.device.startswith(device): return [(p.mountpoint, p.device)] def get_list_of_plot_drives(): """ Return list of tuples of all available plot drives on the system and the device assignment [('/mnt/x0/front/column0/drive3', '/dev/sde1')] ===> Currently Unused """ partitions = psutil.disk_partitions(all=False) mountpoint = [] for p in partitions: if p.device.startswith('/dev/sd') and p.mountpoint.startswith(mount_path): mountpoint.append((p.mountpoint, p.device, p.fstype)) return mountpoint # Thank you: https://gist.github.com/shawnbutts/3906915 def bytesto(bytes, to, bsize=1024): a = {'k': 1, 'm': 2, 'g': 3, 't': 4, 'p': 5, 'e': 6} r = float(bytes) return bytes / (bsize ** a[to]) def get_all_available_system_space(type): """ Return Systems drive space information (total, used and free) based on plot_size """ partitions = psutil.disk_partitions(all=False) drive_space_available = [] for p in partitions: if p.device.startswith('/dev/sd') and p.mountpoint.startswith(mount_path): if type == 'all': drive_space_available.append((p.mountpoint, shutil.disk_usage(p.mountpoint))) if type == 'total': drive_space_available.append(int(bytesto(shutil.disk_usage(p.mountpoint)[0], 'g') / plot_size_g)) if type == 'used': drive_space_available.append(int(bytesto(shutil.disk_usage(p.mountpoint)[1], 'g') / plot_size_g)) if type == 'free': drive_space_available.append(int(bytesto(shutil.disk_usage(p.mountpoint)[2], 'g') / plot_size_g)) return len(drive_space_available), sum(drive_space_available) def get_plot_drive_with_available_space(): """ This looks at all available plot drives that start with /dev/sd and include /mnt/x in the mount path (this covers all of my plot drives), it then looks for any drive that has enough space for at least one plot (k32), sorts that list based on the /dev/sdx sorting and then returns the mountpoint and the device of each drive. ======> Currently Unused <====== """ available_drives = [] for part in psutil.disk_partitions(all=False): if part.device.startswith('/dev/sd') and part.mountpoint.startswith(mount_path) and get_drive_info( 'space_free_plots_by_mountpoint', part.mountpoint) >= 1: available_drives.append((part.mountpoint, part.device)) return (sorted(available_drives, key=lambda x: x[1])) def get_plot_drive_to_use(): """ This looks at all available plot drives that start with /dev/sd and include /mnt/x in the mount path (this covers all of my plot drives), it then looks for any drive that has enough space for at least one plot (k32), sorts that list based on the drive# sorting (drive0, drive10, etc) sorting and then returns the mountpoint of the device we want to use. Basically the same as above but simply returns the 'next' available drive we want to use. This also checks to make sure the drive selected has not been marked as "offline". #TODO incorporate in get_plot_drive_with_available_space() """ with open('offlined_drives', 'r') as offlined_drives_list: offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()] available_drives = [] for part in psutil.disk_partitions(all=False): if part.device.startswith('/dev/sd') \ and part.mountpoint.startswith(mount_path) \ and get_drive_info('space_free_plots_by_mountpoint', part.mountpoint) >= 1 \ and get_drive_by_mountpoint(part.mountpoint) not in offlined_drives: drive = get_drive_by_mountpoint(part.mountpoint) available_drives.append((part.mountpoint, part.device, drive)) return (natsorted(available_drives)[0][0]) def get_sorted_drive_list(): """ Returns sorted list of drives """ available_drives = [] for part in psutil.disk_partitions(all=False): if part.device.startswith('/dev/sd') and part.mountpoint.startswith(mount_path): drive=get_drive_by_mountpoint(part.mountpoint) available_drives.append((part.mountpoint, part.device, drive)) return natsorted(available_drives) def get_current_plot_drive_info(): """ Designed for debugging and logging purposes when we switch drives """ return Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).temperature def log_drive_report(): """ Logs a drive report of our newly selected plot drive """ templ = "%-15s %6s %15s %12s %10s %5s" log.info(templ % ("New Plot Drive", "Size", "Avail Plots", "Serial #", "Temp °C", "Mount Point")) usage = psutil.disk_usage(get_device_by_mountpoint(get_plot_drive_to_use())[0][0]) log.info(templ % ( get_device_by_mountpoint(get_plot_drive_to_use())[0][1], bytes2human(usage.total), get_drive_info('space_free_plots_by_mountpoint', (get_plot_drive_to_use())), Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).serial, Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).temperature, get_device_by_mountpoint(get_plot_drive_to_use())[0][0])) def online_offline_drive(drive, onoffline): log.debug(f'online_offline_drive() called with [{drive}] , [{onoffline}]') if get_device_info_by_drive_number(drive) == None: print() print(f'{red}WARNING{nc}: {blue}{drive}{nc} does not exist or is not mounted on this system!') print() log.debug(f'Drive: {drive} does not exist or is not mounted on this system!') else: if onoffline == 'offline': offlined_drives = [] with open('offlined_drives', 'r') as offlined_drives_list: offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()] if drive in offlined_drives: print() print(f'Drive: {blue}{drive}{nc} Already in {red}OFFLINE{nc} mode! No action taken.') print() log.debug(f'Drive: {drive} Already in offline mode!') else: offlined_drives.append(drive) with open('offlined_drives', 'w') as offlined_drive_list: offlined_drive_list.writelines("%s\n" % drives for drives in offlined_drives) print() print(f'Drive: {blue}{drive}{nc} Put into {red}OFFLINE{nc} mode! Plots will not be written to this drive!') print() log.debug(f'Drive: {drive} Put into OFFLINE mode! Plots will not be written to this drive!') elif onoffline == 'online': offlined_drives = [] with open('offlined_drives', 'r') as offlined_drives_list: offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()] if drive in offlined_drives: offlined_drives.remove(drive) with open('offlined_drives', 'w') as offlined_drive_list: offlined_drive_list.writelines("%s\n" % drives for drives in offlined_drives) print() print(f'Drive: {blue}{drive}{nc} Put into {green}ONLINE{nc} mode! Plots will now be written to this drive!') print() log.debug(f'Drive: {drive} Put into ONLINE mode! Plots will now be written to this drive!') else: print() print(f'Drive: {blue}{drive}{nc} was not in {red}OFFLINE{nc} mode! No action taken.') print() log.debug(f'Drive: {drive} was not offline!') elif onoffline == 'check': with open('offlined_drives', 'r') as offlined_drives_list: offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()] if drive in offlined_drives: return True else: return False def update_receive_plot(): """ This utilizes the get_plot_drive_to_use() function and builds out our netcat receive_plot.sh script that is called by our plotting server when it is ready to send over a new plot. The plotting server sends the plot 'in the blind' so-to-speak, this function determines what drive the plot will go on and updates the receive shell script accordingly. Eventually I will do all of the netcat within the script here. See TODO: Update to use netcat native to python. """ log.debug("update_receive_plot() Started") total_serverwide_plots = get_all_available_system_space('used')[1] log.debug(f'Total Serverwide Plots: {total_serverwide_plots}') # First determine if there is a remote file transfer in process. If there is, pass until it is done: if os.path.isfile(read_config_data('plot_manager_config', 'remote_transfer', 'remote_transfer_active', False)): log.debug('Remote Transfer in Progress, will try again soon!') quit() else: current_plotting_drive = read_config_data('plot_manager_config', 'plotting_drives', 'current_plotting_drive', False) if current_plotting_drive == get_plot_drive_to_use(): log.debug(f'Currently Configured Plot Drive: {current_plotting_drive}') log.debug(f'System Selected Plot Drive: {get_plot_drive_to_use()}') log.debug('Configured and Selected Drives Match!') log.debug(f'No changes necessary to {receive_script}') log.debug( f'Plots left available on configured plotting drive: {get_drive_info('space_free_plots_by_mountpoint', current_plotting_drive)}') else: send_new_plot_disk_email() # This is the full Plot drive report. This is in addition to the generic email sent by the # notify() function. notify('Plot Drive Updated', f'Plot Drive Updated: Was: {current_plotting_drive}, Now: {get_plot_drive_to_use()}') f = open(receive_script, 'w+') f.write('#! /bin/bash \n') f.write(f'nc -l -q5 -p 4040 > "{get_plot_drive_to_use()}/$1" < /dev/null') f.close() update_config_data('plot_manager_config', 'plotting_drives', 'current_plotting_drive', get_plot_drive_to_use()) log.info(f'Updated {receive_script} and system config file with new plot drive.') log.info(f'Was: {current_plotting_drive}, Now: {get_plot_drive_to_use()}') log_drive_report() def send_new_plot_disk_email(): usage = psutil.disk_usage(get_device_by_mountpoint(get_plot_drive_to_use())[0][0]) current_plotting_drive = read_config_data('plot_manager_config', 'plotting_drives', 'current_plotting_drive', False) if read_config_data('plot_manager_config', 'notifications', 'new_plot_drive', True): for email_address in system_info.alert_email: send_template_email(template='new_plotting_drive.html', recipient=email_address, subject='New Plotting Drive Selected\nContent-Type: text/html', current_time=current_military_time, nas_server=nas_server, previous_plotting_drive=current_plotting_drive, plots_on_previous_plotting_drive=get_drive_info('total_current_plots_by_mountpoint',current_plotting_drive), current_plotting_drive_by_mountpoint=get_plot_drive_to_use(), current_plotting_drive_by_device=get_device_by_mountpoint(get_plot_drive_to_use())[0][1], drive_size=bytes2human(usage.total), plots_available=get_drive_info('space_free_plots_by_mountpoint', (get_plot_drive_to_use())), drive_serial_number=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).serial, current_drive_temperature=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).temperature, smart_health_assessment=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).assessment, total_serverwide_plots=get_all_available_system_space('used')[1], total_serverwide_plots_chia=check_plots()[0], total_serverwide_space_per_chia=check_plots()[1], total_number_of_drives=get_all_available_system_space('total')[0], total_k32_plots_until_full=get_all_available_system_space('free')[1], max_number_of_plots=get_all_available_system_space('total')[1], days_to_fill_drives=(int(get_all_available_system_space('free')[1] / int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False))))) else: pass def send_daily_update_email(): usage = psutil.disk_usage(get_device_by_mountpoint(get_plot_drive_to_use())[0][0]) if read_config_data('plot_manager_config', 'notifications', 'daily_update', True): for email_address in system_info.alert_email: send_template_email(template='daily_update.html', recipient=email_address, subject='NAS Server Daily Update\nContent-Type: text/html', current_time=current_military_time, nas_server=nas_server, current_plotting_drive_by_mountpoint=get_plot_drive_to_use(), current_plotting_drive_by_device=get_device_by_mountpoint(get_plot_drive_to_use())[0][1], drive_size=bytes2human(usage.total), drive_serial_number=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).serial, current_drive_temperature=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).temperature, smart_health_assessment=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).assessment, total_serverwide_plots=get_all_available_system_space('used')[1], total_number_of_drives=get_all_available_system_space('total')[0], total_k32_plots_until_full=get_all_available_system_space('free')[1], max_number_of_plots=get_all_available_system_space('total')[1], total_serverwide_plots_chia=check_plots()[0], total_serverwide_space_per_chia=check_plots()[1], total_plots_last_day=read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False), days_to_fill_drives=(int(get_all_available_system_space('free')[1] / int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False)))), average_plots_per_hour=round((int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False)))/24,1), average_plotting_speed=(int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False)) * int(plot_size_g)/1000)) else: pass def space_report(): print('') print(f'{blue}############################################################{nc}') print(f'{blue}################### {green}{nas_server} Plot Report{blue} ##################{nc}' ) print(f'{blue}############################################################{nc}') print (f'Total Number of Plots on {green}{nas_server}{nc}: {yellow}{get_all_available_system_space('used')[1]}{nc}') print (f'Total Number of Plots {green}Chia{nc} is Farming: {yellow}{check_plots()[0]}{nc}') print (f'Total Amount of Drive Space (TiB) {green}Chia{nc} is Farming: {yellow}{check_plots()[1]}{nc}') print (f'Total Number of Systemwide Plots Drives: {yellow}{get_all_available_system_space('total')[0]}{nc}') print (f'Total Number of k32 Plots until full: {yellow}{get_all_available_system_space('free')[1]}{nc}') print (f'Maximum # of plots when full: {yellow}{get_all_available_system_space('total')[1]}{nc}') print (f"Plots completed in the last 24 Hours: {yellow}{int(read_config_data("plot_manager_config", "plotting_information", "current_total_plots_daily", False))}{nc}") print (f"Average Plots per Hours: {yellow}{round((int(read_config_data("plot_manager_config", "plotting_information", "current_total_plots_daily", False)))/24,1)}{nc}") print (f"Average Plotting Speed Last 24 Hours (TiB/Day): {yellow}{round((int(read_config_data("plot_manager_config", "plotting_information", "current_total_plots_daily", False)) * int(plot_size_g)/1000),2)}{nc} ") print(f"Appx Number of Days to fill all current plot drives: {yellow} {int(get_all_available_system_space("free")[1] / int(read_config_data("plot_manager_config", "plotting_information", "current_total_plots_daily", False)))} {nc} ") print (f"Current Plot Storage Drive: {yellow}{(get_device_by_mountpoint(read_config_data("plot_manager_config", "plotting_drives", "current_plotting_drive", False))[0][1])}{nc}") print (f"Temperature of Current Plot Drive: {yellow}{Device((get_device_by_mountpoint(read_config_data("plot_manager_config", "plotting_drives", "current_plotting_drive", False))[0][1])).temperature}°C{nc}") print (f"Latest Smart Drive Assessment of Plot Drive: {yellow}{Device((get_device_by_mountpoint(read_config_data("plot_manager_config", "plotting_drives", "current_plotting_drive", False))[0][1])).assessment}{nc}") print(f'{blue}############################################################{nc}') print('') print('') def temperature_report(): print('') print(f'{blue}#################################################################{nc}') print(f'{blue}################# {green}{nas_server} Temperature Report {blue}##################{nc}') print(f'{blue}#################################################################{nc}') print(f'{blue}# {nc}Serial#{blue} #{nc} Device{blue} #{nc} Drive{blue} #{nc} Temp{blue} #{nc}') print(f'{blue}#################################################################{nc}') for drive in get_sorted_drive_list(): print(f'{blue}#{nc} {Device(drive[1]).serial}'f'{blue} #{nc}'f' {drive[1]}{blue} #{nc}' f' {((get_drive_by_mountpoint(drive[0])))}{blue} #{nc}' f' {Device(drive[1]).temperature}°C'f'{blue} #{nc}') print(f'{blue}##################################################################{nc}') print('') print('') # You should run this once per day to sse total daily plots # in your reports. If you run it more often, the numbers will # not be correct. I use midnight here for my purposes, but # this is just a var name. def update_daily_plot_counts(): current_total_plots_midnight = int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_midnight', False)) total_serverwide_plots = get_all_available_system_space('used')[1] update_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_midnight', str(total_serverwide_plots)) total_plots_daily = (total_serverwide_plots - current_total_plots_midnight) update_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', str(total_plots_daily)) def send_email(recipient, subject, body): """ Part of our notification system. Setup to send email via the builtin linux mail command. Your local system **must** be configured already to send mail or this will fail. https://stackoverflow.com/questions/27874102/executing-shell-mail-command-using-python https://nedbatchelder.com/text/unipain.html https://www.digitalocean.com/community/tutorials/how-to-install-and-configure-postfix-as-a-send-only-smtp-server-on-ubuntu-20-04 """ try: subprocess.run(['mail', '-s', subject, recipient], input=body, encoding='utf-8') log.debug(f"Email Notification Sent: Subject: {subject}, Recipient: {recipient}, Message: {body}") except subprocess.CalledProcessError as e: log.debug(f'send_email error: {e}') capture_exception(e) except Exception as e: log.debug(f'send_email: Unknown Error! Email not sent.') capture_exception(e) # Setup to send out Pushbullet alerts. Pushbullet config is in system_info.py def send_push_notification(title, message): """Part of our notification system. This handles sending PushBullets.""" try: pb = Pushbullet(system_info.pushbilletAPI) push = pb.push_note(title, message) log.debug(f"Pushbullet Notification Sent: {title} - {message}") except pb_errors.InvalidKeyError as e: log.debug(f'Pushbullet Exception: Invalid API Key! Message not sent.') capture_exception(e) except Exception as e: log.debug(f'Pushbullet Exception: Unknown Pushbullet Error: {e}. Message not sent.') capture_exception(e) def send_sms_notification(body, phone_number): """Part of our notification system. This handles sending SMS messages.""" try: client = Client(system_info.twilio_account, system_info.twilio_token) message = client.messages.create(to=phone_number, from_=system_info.twilio_from, body=body) log.debug(f"SMS Notification Sent: {body}.") except TwilioRestException as e: log.debug(f'Twilio Exception: {e}. Message not sent.') capture_exception(e) except Exception as e: log.debug(f'Twilio Exception: {e}. Message not sent.') capture_exception(e) def notify(title, message): """ Notify system for email, pushbullet and sms (via Twilio)""" log.debug(f'notify() called with Title: {title} and Message: {message}') if (read_config_data('plot_manager_config', 'notifications', 'alerting', True)): if (read_config_data('plot_manager_config', 'notifications', 'pb', True)): send_push_notification(title, message) if (read_config_data('plot_manager_config', 'notifications', 'email', True)): for email_address in system_info.alert_email: send_email(email_address, title, message) if (read_config_data('plot_manager_config', 'notifications', 'sms', True)): for phone_number in system_info.twilio_to: send_sms_notification(message, phone_number) else: pass # Thank You - https://frankcorso.dev/email-html-templates-jinja-python.html def send_template_email(template, recipient, subject, **kwargs): """Sends an email using a jinja template.""" env = Environment( loader=PackageLoader('drive_manager', 'templates'), autoescape=select_autoescape(['html', 'xml']) ) template = env.get_template(template) send_email(recipient, subject, template.render(**kwargs)) # This function called from crontab. First run the daily update (-ud) then (-dr): # 01 00 * * * /usr/bin/python3 /rxoot/plot_manager/drive_manager.py -ud >/dev/null 2>&1 # 02 00 * * * /usr/bin/python3 /rxoot/plot_manager/drive_manager.py -dr >/dev/null 2>&1 def send_daily_email(): log.debug('send_daily_email() Started') send_daily_update_email() log.info('Daily Update Email Sent!') def send_new_plot_notification(): log.debug('send_new_plot_notification() Started') if os.path.isfile('new_plot_received'): log.debug('New Plot Received') if read_config_data('plot_manager_config', 'notifications', 'per_plot', True): notify('New Plot Received', 'New Plot Received') os.remove('new_plot_received') def check_plots(): with open(chia_log_file, 'rb', 0) as f: m = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) i = m.rfind(b'Loaded') m.seek(i) line = m.readline() newline = line.decode("utf-8") x = newline.split() plots = x[4] TiB = float(x[8]) return plots, f'{TiB:.0f}' def main(): parser = init_argparser() args = parser.parse_args() if args.daily_report: send_daily_email() elif args.plot_report: space_report() elif args.update_daily: update_daily_plot_counts() elif args.check_temps: temperature_report() elif args.offline_hdd: online_offline_drive(args.offline_hdd, 'offline') elif get_offlined_drives(): if args.online_hdd: online_offline_drive(args.online_hdd, 'online') else: send_new_plot_notification() update_receive_plot() else: send_new_plot_notification() update_receive_plot() if __name__ == '__main__': main()
#!/usr/bin/python3 # -*- coding: utf-8 -*- __author__ = 'Richard J. Sears' VERSION = "0.6 (2021-04-22)" ### Simple python script that helps to move my chia plots from my plotter to ### my nas. I wanted to use netcat as it was much faster on my 10GBe link than ### rsync and the servers are secure so I wrote this script to manage that ### move process. It will get better with time as I add in error checking and ### other things like notifications and stuff. # Updates # # V0.6 2021-04-22 # - Check Chia logs and report actual plots being farmed (per Chia) and # total amount of drive space in use (also per Chia). It is not # uncommon for the total number of plots on your system to be slightly # different that what `drive_manager.py` reports due to plot moves, etc # but if there is a large difference, you should check your logs for # signs of other issues. # # V0.5 2021-04-22 # - Updated to support local plot management via `move_local_plots.py` # # V0.4 2021-04-13 # - Added ability to "offline" a drive for maintenance. Before, the script would # select the very first available drive (based on drive number: drive0, drive1) # for plot storage. I ran into a problem when one of my drives kicked out a # smartctl error and I needed to move the plots off of it before it failed. As # soon as I started to move them, drive_manager.py started to fill the drive # back up. So now you can offline and drive_manager will not use it until # you online it again. You still need to go into your chia harvester config # and remove the drive from there. # # # V0.3 2021-04-04 # - Added multiple command line utilities to drive_manager.py including: # * -dr or --drive_report Immediately runs the Daily Report and sends email (if configured) # * -ct or --check_temps Checks the temperatures of all configured plot drives # * -pr or --plot_report Quick plot report like email report but to the screen # * -ud or --update_daily Designed to be called from cron, updates daily plot stats (speed, etc) # Be careful if using it from the command line, it resets your stats. This # should be run once per day from a cronjob. # # - Added plot time information to the daily email report including: # * Total Plots last 24 hours # * Average Plots per Hour (last 24 hours) # * Average Plotting Speed (last 24 hours) # # V0.2 2021-30-23 # - Moved system logging types to plot_manager_config and updated necessary functions. # - Added per_plot system notification function (send_new_plot_notification()) # - Updated read_config_data() to support ConfigParser boolean returns # - Updated necessary functions for read_config_data() change import os import sys mount_path = "/mnt/usb/" main_path = "/home/pw/chia_plot_manager/chianas/" sys.path.append(main_path) import subprocess import shutil import psutil from pySMART import Device, DeviceList # CAUTION - DO NOT use PyPI version, use https://github.com/truenas/py-SMART from psutil._common import bytes2human import logging from system_logging import setup_logging from system_logging import read_logging_config import system_info from pushbullet import Pushbullet, errors as pb_errors from twilio.rest import Client from twilio.base.exceptions import TwilioRestException import configparser from jinja2 import Environment, PackageLoader, select_autoescape from datetime import datetime from datetime import timedelta import time config = configparser.ConfigParser() import argparse import textwrap from natsort import natsorted import mmap # Define some colors for our help message red='\033[0;31m' yellow='\033[0;33m' green='\033[0;32m' white='\033[0;37m' blue='\033[0;34m' nc='\033[0m' import sentry_sdk #sentry_sdk.init( # "https://xxxxxxxxxxxxxxxxxxxxxxx.ingest.sentry.io/xxxxxxxxx", # # # Set traces_sample_rate to 1.0 to capture 100% # # of transactions for performance monitoring. # # We recommend adjusting this value in production. # traces_sample_rate=1.0 #) from sentry_sdk import capture_exception # Let's do some housekeeping nas_server = 'chianas01' plot_size_k = 108995911228 plot_size_g = 101.3623551 receive_script = main_path + 'receive_plot.sh' chia_log_file = '/home/pw/.chia/mainnet/log/debug.log' # Date and Time Stuff today = datetime.today().strftime('%A').lower() current_military_time = datetime.now().strftime('%H:%M:%S') current_timestamp = int(time.time()) # Setup Module logging. Main logging is configured in system_logging.py setup_logging() level = read_logging_config('plot_manager_config', 'system_logging', 'log_level') level = logging._checkLevel(level) log = logging.getLogger(__name__) log.setLevel(level) # Define our help message class RawFormatter(argparse.HelpFormatter): def _fill_text(self, text, width, indent): return "\n".join( [textwrap.fill(line, width) for line in textwrap.indent(textwrap.dedent(text), indent).splitlines()]) program_descripton = f''' {red}******** {green}ChiaNAS Drive Manager{nc} - {blue}{VERSION}{red} ********{nc} Running drive_manager.py with no arguments causes drive_manager to run in '{yellow}normal{nc}' mode. In this mode {green}drive_manager{nc} will check the drive utilization and update which drive your Chia plots will be sent to when they arrive from your plotter. This is generally called from a cronjob on a regular basis. Please read the full information about how it works on my github page. There are several commandline switches you can use to get immediate reports and feedback: {green}-dr {nc}or{green} --drive_report{nc} {blue}Runs the Daily ChiaNAS Report (if configured), and emails it to you. This can be called from a crontab job as well.{nc} {green}-ct {nc}or{green} --check_temps{blue} This will query all of your hard drives using {yellow}smartctl{blue} and return a list of drive temperatures to you. {green}-pr {nc}or{green} --plot_report{blue} This queries the NAS and returns a report letting you know how many plots are currently on the system and how many more you can add based on the current drive configuration. It also includes plotting speed information for the last 24 hours.{nc} {green}-ud {nc}or{green} --update_daily{blue} This updates the total number of plots the system has created over the past 24 hours. Use with {nc}CAUTION!{blue}. This {nc}should{blue} be ran from crontab once every 24 hours only! It updates the total from the last time is was run until now, hence why you should only run this once per 24 hours.{nc} {green}-off {nc}or{green} --offline_hdd{blue} This takes a drive as it's input (for example {yellow} drive6{blue}) and "{red}offlines{blue}" it so that no more plots will get written to it. You must {green}--on{blue} or {green}--online_hdd{blue} the drive for it to be used again. Useful if the drive is failing and needs to be replaced. You cannot "{red}offline{blue} a drive that is not mounted. {green}-on {nc}or{green} --online_hdd{blue} This takes a drive as it's input (for example {yellow} drive6{blue}) and "{green}onlines{blue}" it so that plots will get written to it. This option will be {nc}UNAVAILABLE{blue} if there are no drives that have been offlined!{nc} USAGE: ''' # Grab command line arguments if there are any def init_argparser(): with open('offlined_drives', 'r') as offlined_drives_list: offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()] parser = argparse.ArgumentParser(description=program_descripton, formatter_class=RawFormatter) parser.add_argument('-v', '--version', action='version', version=f'{parser.prog} {VERSION}') parser.add_argument('-dr', '--daily_report', action='store_true', help='Run the ChiaPlot Daily Email Report and exit') parser.add_argument('-ct', '--check_temps', action='store_true', help='Return a list of drives and their temperatures and exit') parser.add_argument('-pr', '--plot_report', action='store_true', help='Return the total # of plots on the system and total you can add and exit') parser.add_argument('-ud', '--update_daily', action='store_true', help=f'Updates 24 hour plot count. {red}USE WITH CAUTION, USE WITH CRONTAB{nc}') parser.add_argument('-off', '--offline_hdd', action='store', help=f'Offline a specific drive. Use drive number: {green}drive6{nc}') if offlined_drives != []: parser.add_argument('-on', '--online_hdd', action='store', help=f'Online a specific drive.' , choices=offlined_drives) return parser def get_offlined_drives(): with open('offlined_drives', 'r') as offlined_drives_list: offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()] if offlined_drives != None: return offlined_drives else: return False # Setup to read and write to our config file. # If we are expecting a boolean back pass True/1 for bool, # otherwise False/0 def read_config_data(file, section, item, bool): pathname = main_path + file config.read(pathname) if bool: return config.getboolean(section, item) else: return config.get(section, item) def update_config_data(file, section, item, value): pathname = main_path + file config.read(pathname) cfgfile = open(pathname, 'w') config.set(section, item, value) config.write(cfgfile) cfgfile.close() def get_drive_info(action, drive): """ This allows us to query specific information about our drives including temperatures, smart assessments, and space available to use for plots. It allows us to simply hand it a drive number (drive0, drive22, etc) and will present us with the data back. This utilizes pySMART, but a word of caution, use the TrueNAS versions linked to above, the PiPy version has a bug! """ if action == 'device': plot_drives = get_list_of_plot_drives() device = [hd for hd in plot_drives if hd[0] == (get_mountpoint_by_drive_number(drive)[0])] if device != []: device = [hd for hd in plot_drives if hd[0] == (get_mountpoint_by_drive_number(drive)[0])] return device[0][1] if action == 'temperature': return Device(get_device_info_by_drive_number(drive)[0][1]).temperature if action == 'capacity': return Device(get_device_info_by_drive_number(drive)[0][1]).capacity if action == 'health': return Device(get_device_info_by_drive_number(drive)[0][1]).assessment if action == 'name': return Device(get_device_info_by_drive_number(drive)[0][1]).name if action == 'serial': return Device(get_device_info_by_drive_number(drive)[0][1]).serial if action == 'space_total': return int(bytesto(shutil.disk_usage(get_device_info_by_drive_number(drive)[0][0])[0], 'g')) if action == 'space_used': return int(bytesto(shutil.disk_usage(get_device_info_by_drive_number(drive)[0][0])[1], 'g')) if action == 'space_free': return int(bytesto(shutil.disk_usage(get_device_info_by_drive_number(drive)[0][0])[2], 'g')) if action == 'space_free_plots': return int(bytesto(shutil.disk_usage(get_device_info_by_drive_number(drive)[0][0])[2], 'g') / plot_size_g) if action == 'space_free_plots_by_mountpoint': return int(bytesto(shutil.disk_usage(drive)[2], 'g') / plot_size_g) if action == 'total_current_plots': return int(bytesto(shutil.disk_usage(get_mountpoint_by_drive_number(drive)[0])[1], 'g') / plot_size_g) if action == 'total_current_plots_by_mountpoint': return int(bytesto(shutil.disk_usage(drive)[1], 'g') / plot_size_g) def dev_test(drive): return shutil.disk_usage(drive) #return Device(drive) def get_drive_by_mountpoint(mountpoint): """ This accepts a mountpoint ('/mnt/x/rear/column2/drive32') and returns the drive: drive32 """ return (mountpoint.split("/")[-1]) def get_mountpoint_by_drive_number(drive): """ This accepts a drive number (drive0) and returns the device assignment: /dev/sda1 and mountpoint: /mnt/x0/front/column0/drive0 """ partitions = psutil.disk_partitions(all=False) for p in partitions: if p.device.startswith('/dev/sd') and p.mountpoint.startswith(mount_path) and p.mountpoint.endswith(drive): return [(p.mountpoint)] def get_device_info_by_drive_number(drive): """ This accepts a drive number (drive0) and returns the device assignment: /dev/sda1 and mountpoint """ partitions = psutil.disk_partitions(all=False) for p in partitions: if p.device.startswith('/dev/sd') and p.mountpoint.startswith(mount_path) and p.mountpoint.endswith(drive): return [(p.mountpoint, p.device)] def get_device_by_mountpoint(mountpoint): """ This accepts a mountpoint and returns the device assignment: /dev/sda1 and mountpoint """ partitions = psutil.disk_partitions(all=False) for p in partitions: if p.device.startswith('/dev/sd') and p.mountpoint.startswith(mountpoint): return [(p.mountpoint, p.device)] def get_mountpoint_by_device(device): """ This accepts a mountpoint and returns the device assignment: /dev/sda1 and mountpoint """ partitions = psutil.disk_partitions(all=False) for p in partitions: if p.device.startswith(device): return [(p.mountpoint, p.device)] def get_list_of_plot_drives(): """ Return list of tuples of all available plot drives on the system and the device assignment [('/mnt/x0/front/column0/drive3', '/dev/sde1')] ===> Currently Unused """ partitions = psutil.disk_partitions(all=False) mountpoint = [] for p in partitions: if p.device.startswith('/dev/sd') and p.mountpoint.startswith(mount_path): mountpoint.append((p.mountpoint, p.device, p.fstype)) return mountpoint # Thank you: https://gist.github.com/shawnbutts/3906915 def bytesto(bytes, to, bsize=1024): a = {'k': 1, 'm': 2, 'g': 3, 't': 4, 'p': 5, 'e': 6} r = float(bytes) return bytes / (bsize ** a[to]) def get_all_available_system_space(type): """ Return Systems drive space information (total, used and free) based on plot_size """ partitions = psutil.disk_partitions(all=False) drive_space_available = [] for p in partitions: if p.device.startswith('/dev/sd') and p.mountpoint.startswith(mount_path): if type == 'all': drive_space_available.append((p.mountpoint, shutil.disk_usage(p.mountpoint))) if type == 'total': drive_space_available.append(int(bytesto(shutil.disk_usage(p.mountpoint)[0], 'g') / plot_size_g)) if type == 'used': drive_space_available.append(int(bytesto(shutil.disk_usage(p.mountpoint)[1], 'g') / plot_size_g)) if type == 'free': drive_space_available.append(int(bytesto(shutil.disk_usage(p.mountpoint)[2], 'g') / plot_size_g)) return len(drive_space_available), sum(drive_space_available) def get_plot_drive_with_available_space(): """ This looks at all available plot drives that start with /dev/sd and include /mnt/x in the mount path (this covers all of my plot drives), it then looks for any drive that has enough space for at least one plot (k32), sorts that list based on the /dev/sdx sorting and then returns the mountpoint and the device of each drive. ======> Currently Unused <====== """ available_drives = [] for part in psutil.disk_partitions(all=False): if part.device.startswith('/dev/sd') and part.mountpoint.startswith(mount_path) and get_drive_info( 'space_free_plots_by_mountpoint', part.mountpoint) >= 1: available_drives.append((part.mountpoint, part.device)) return (sorted(available_drives, key=lambda x: x[1])) def get_plot_drive_to_use(): """ This looks at all available plot drives that start with /dev/sd and include /mnt/x in the mount path (this covers all of my plot drives), it then looks for any drive that has enough space for at least one plot (k32), sorts that list based on the drive# sorting (drive0, drive10, etc) sorting and then returns the mountpoint of the device we want to use. Basically the same as above but simply returns the 'next' available drive we want to use. This also checks to make sure the drive selected has not been marked as "offline". #TODO incorporate in get_plot_drive_with_available_space() """ with open('offlined_drives', 'r') as offlined_drives_list: offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()] available_drives = [] for part in psutil.disk_partitions(all=False): if part.device.startswith('/dev/sd') \ and part.mountpoint.startswith(mount_path) \ and get_drive_info('space_free_plots_by_mountpoint', part.mountpoint) >= 1 \ and get_drive_by_mountpoint(part.mountpoint) not in offlined_drives: drive = get_drive_by_mountpoint(part.mountpoint) available_drives.append((part.mountpoint, part.device, drive)) return (natsorted(available_drives)[0][0]) def get_sorted_drive_list(): """ Returns sorted list of drives """ available_drives = [] for part in psutil.disk_partitions(all=False): if part.device.startswith('/dev/sd') and part.mountpoint.startswith(mount_path): drive=get_drive_by_mountpoint(part.mountpoint) available_drives.append((part.mountpoint, part.device, drive)) return natsorted(available_drives) def get_current_plot_drive_info(): """ Designed for debugging and logging purposes when we switch drives """ return Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).temperature def log_drive_report(): """ Logs a drive report of our newly selected plot drive """ templ = "%-15s %6s %15s %12s %10s %5s" log.info(templ % ("New Plot Drive", "Size", "Avail Plots", "Serial #", "Temp °C", "Mount Point")) usage = psutil.disk_usage(get_device_by_mountpoint(get_plot_drive_to_use())[0][0]) log.info(templ % ( get_device_by_mountpoint(get_plot_drive_to_use())[0][1], bytes2human(usage.total), get_drive_info('space_free_plots_by_mountpoint', (get_plot_drive_to_use())), Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).serial, Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).temperature, get_device_by_mountpoint(get_plot_drive_to_use())[0][0])) def online_offline_drive(drive, onoffline): log.debug(f'online_offline_drive() called with [{drive}] , [{onoffline}]') if get_device_info_by_drive_number(drive) == None: print() print(f'{red}WARNING{nc}: {blue}{drive}{nc} does not exist or is not mounted on this system!') print() log.debug(f'Drive: {drive} does not exist or is not mounted on this system!') else: if onoffline == 'offline': offlined_drives = [] with open('offlined_drives', 'r') as offlined_drives_list: offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()] if drive in offlined_drives: print() print(f'Drive: {blue}{drive}{nc} Already in {red}OFFLINE{nc} mode! No action taken.') print() log.debug(f'Drive: {drive} Already in offline mode!') else: offlined_drives.append(drive) with open('offlined_drives', 'w') as offlined_drive_list: offlined_drive_list.writelines("%s\n" % drives for drives in offlined_drives) print() print(f'Drive: {blue}{drive}{nc} Put into {red}OFFLINE{nc} mode! Plots will not be written to this drive!') print() log.debug(f'Drive: {drive} Put into OFFLINE mode! Plots will not be written to this drive!') elif onoffline == 'online': offlined_drives = [] with open('offlined_drives', 'r') as offlined_drives_list: offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()] if drive in offlined_drives: offlined_drives.remove(drive) with open('offlined_drives', 'w') as offlined_drive_list: offlined_drive_list.writelines("%s\n" % drives for drives in offlined_drives) print() print(f'Drive: {blue}{drive}{nc} Put into {green}ONLINE{nc} mode! Plots will now be written to this drive!') print() log.debug(f'Drive: {drive} Put into ONLINE mode! Plots will now be written to this drive!') else: print() print(f'Drive: {blue}{drive}{nc} was not in {red}OFFLINE{nc} mode! No action taken.') print() log.debug(f'Drive: {drive} was not offline!') elif onoffline == 'check': with open('offlined_drives', 'r') as offlined_drives_list: offlined_drives = [current_drives.rstrip() for current_drives in offlined_drives_list.readlines()] if drive in offlined_drives: return True else: return False def update_receive_plot(): """ This utilizes the get_plot_drive_to_use() function and builds out our netcat receive_plot.sh script that is called by our plotting server when it is ready to send over a new plot. The plotting server sends the plot 'in the blind' so-to-speak, this function determines what drive the plot will go on and updates the receive shell script accordingly. Eventually I will do all of the netcat within the script here. See TODO: Update to use netcat native to python. """ log.debug("update_receive_plot() Started") total_serverwide_plots = get_all_available_system_space('used')[1] log.debug(f'Total Serverwide Plots: {total_serverwide_plots}') # First determine if there is a remote file transfer in process. If there is, pass until it is done: if os.path.isfile(read_config_data('plot_manager_config', 'remote_transfer', 'remote_transfer_active', False)): log.debug('Remote Transfer in Progress, will try again soon!') quit() else: current_plotting_drive = read_config_data('plot_manager_config', 'plotting_drives', 'current_plotting_drive', False) if current_plotting_drive == get_plot_drive_to_use(): log.debug(f'Currently Configured Plot Drive: {current_plotting_drive}') log.debug(f'System Selected Plot Drive: {get_plot_drive_to_use()}') log.debug('Configured and Selected Drives Match!') log.debug(f'No changes necessary to {receive_script}') log.debug( f'Plots left available on configured plotting drive: {get_drive_info("space_free_plots_by_mountpoint", current_plotting_drive)}') else: send_new_plot_disk_email() # This is the full Plot drive report. This is in addition to the generic email sent by the # notify() function. notify('Plot Drive Updated', f'Plot Drive Updated: Was: {current_plotting_drive}, Now: {get_plot_drive_to_use()}') f = open(receive_script, 'w+') f.write('#! /bin/bash \n') f.write(f'nc -l -q5 -p 4040 > "{get_plot_drive_to_use()}/$1" < /dev/null') f.close() update_config_data('plot_manager_config', 'plotting_drives', 'current_plotting_drive', get_plot_drive_to_use()) log.info(f'Updated {receive_script} and system config file with new plot drive.') log.info(f'Was: {current_plotting_drive}, Now: {get_plot_drive_to_use()}') log_drive_report() def send_new_plot_disk_email(): usage = psutil.disk_usage(get_device_by_mountpoint(get_plot_drive_to_use())[0][0]) current_plotting_drive = read_config_data('plot_manager_config', 'plotting_drives', 'current_plotting_drive', False) if read_config_data('plot_manager_config', 'notifications', 'new_plot_drive', True): for email_address in system_info.alert_email: send_template_email(template='new_plotting_drive.html', recipient=email_address, subject='New Plotting Drive Selected\nContent-Type: text/html', current_time=current_military_time, nas_server=nas_server, previous_plotting_drive=current_plotting_drive, plots_on_previous_plotting_drive=get_drive_info('total_current_plots_by_mountpoint',current_plotting_drive), current_plotting_drive_by_mountpoint=get_plot_drive_to_use(), current_plotting_drive_by_device=get_device_by_mountpoint(get_plot_drive_to_use())[0][1], drive_size=bytes2human(usage.total), plots_available=get_drive_info('space_free_plots_by_mountpoint', (get_plot_drive_to_use())), drive_serial_number=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).serial, current_drive_temperature=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).temperature, smart_health_assessment=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).assessment, total_serverwide_plots=get_all_available_system_space('used')[1], total_serverwide_plots_chia=check_plots()[0], total_serverwide_space_per_chia=check_plots()[1], total_number_of_drives=get_all_available_system_space('total')[0], total_k32_plots_until_full=get_all_available_system_space('free')[1], max_number_of_plots=get_all_available_system_space('total')[1], days_to_fill_drives=(int(get_all_available_system_space('free')[1] / int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False))))) else: pass def send_daily_update_email(): usage = psutil.disk_usage(get_device_by_mountpoint(get_plot_drive_to_use())[0][0]) if read_config_data('plot_manager_config', 'notifications', 'daily_update', True): for email_address in system_info.alert_email: send_template_email(template='daily_update.html', recipient=email_address, subject='NAS Server Daily Update\nContent-Type: text/html', current_time=current_military_time, nas_server=nas_server, current_plotting_drive_by_mountpoint=get_plot_drive_to_use(), current_plotting_drive_by_device=get_device_by_mountpoint(get_plot_drive_to_use())[0][1], drive_size=bytes2human(usage.total), drive_serial_number=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).serial, current_drive_temperature=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).temperature, smart_health_assessment=Device(get_device_by_mountpoint(get_plot_drive_to_use())[0][1]).assessment, total_serverwide_plots=get_all_available_system_space('used')[1], total_number_of_drives=get_all_available_system_space('total')[0], total_k32_plots_until_full=get_all_available_system_space('free')[1], max_number_of_plots=get_all_available_system_space('total')[1], total_serverwide_plots_chia=check_plots()[0], total_serverwide_space_per_chia=check_plots()[1], total_plots_last_day=read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False), days_to_fill_drives=(int(get_all_available_system_space('free')[1] / int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False)))), average_plots_per_hour=round((int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False)))/24,1), average_plotting_speed=(int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False)) * int(plot_size_g)/1000)) else: pass def space_report(): print('') print(f'{blue}############################################################{nc}') print(f'{blue}################### {green}{nas_server} Plot Report{blue} ##################{nc}' ) print(f'{blue}############################################################{nc}') print (f'Total Number of Plots on {green}{nas_server}{nc}: {yellow}{get_all_available_system_space("used")[1]}{nc}') print (f'Total Number of Plots {green}Chia{nc} is Farming: {yellow}{check_plots()[0]}{nc}') print (f'Total Amount of Drive Space (TiB) {green}Chia{nc} is Farming: {yellow}{check_plots()[1]}{nc}') print (f'Total Number of Systemwide Plots Drives: {yellow}{get_all_available_system_space("total")[0]}{nc}') print (f'Total Number of k32 Plots until full: {yellow}{get_all_available_system_space("free")[1]}{nc}') print (f'Maximum # of plots when full: {yellow}{get_all_available_system_space("total")[1]}{nc}') print (f"Plots completed in the last 24 Hours: {yellow}{int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False))}{nc}") print (f"Average Plots per Hours: {yellow}{round((int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False)))/24,1)}{nc}") print (f"Average Plotting Speed Last 24 Hours (TiB/Day): {yellow}{round((int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False)) * int(plot_size_g)/1000),2)}{nc} ") print(f"Appx Number of Days to fill all current plot drives: {yellow} {int(get_all_available_system_space('free')[1] / int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', False)))} {nc} ") print (f"Current Plot Storage Drive: {yellow}{(get_device_by_mountpoint(read_config_data('plot_manager_config', 'plotting_drives', 'current_plotting_drive', False))[0][1])}{nc}") print (f"Temperature of Current Plot Drive: {yellow}{Device((get_device_by_mountpoint(read_config_data('plot_manager_config', 'plotting_drives', 'current_plotting_drive', False))[0][1])).temperature}°C{nc}") print (f"Latest Smart Drive Assessment of Plot Drive: {yellow}{Device((get_device_by_mountpoint(read_config_data('plot_manager_config', 'plotting_drives', 'current_plotting_drive', False))[0][1])).assessment}{nc}") print(f'{blue}############################################################{nc}') print('') print('') def temperature_report(): print('') print(f'{blue}#################################################################{nc}') print(f'{blue}################# {green}{nas_server} Temperature Report {blue}##################{nc}') print(f'{blue}#################################################################{nc}') print(f'{blue}# {nc}Serial#{blue} #{nc} Device{blue} #{nc} Drive{blue} #{nc} Temp{blue} #{nc}') print(f'{blue}#################################################################{nc}') for drive in get_sorted_drive_list(): print(f'{blue}#{nc} {Device(drive[1]).serial}'f'{blue} #{nc}'f' {drive[1]}{blue} #{nc}' f' {((get_drive_by_mountpoint(drive[0])))}{blue} #{nc}' f' {Device(drive[1]).temperature}°C'f'{blue} #{nc}') print(f'{blue}##################################################################{nc}') print('') print('') # You should run this once per day to sse total daily plots # in your reports. If you run it more often, the numbers will # not be correct. I use midnight here for my purposes, but # this is just a var name. def update_daily_plot_counts(): current_total_plots_midnight = int(read_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_midnight', False)) total_serverwide_plots = get_all_available_system_space('used')[1] update_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_midnight', str(total_serverwide_plots)) total_plots_daily = (total_serverwide_plots - current_total_plots_midnight) update_config_data('plot_manager_config', 'plotting_information', 'current_total_plots_daily', str(total_plots_daily)) def send_email(recipient, subject, body): """ Part of our notification system. Setup to send email via the builtin linux mail command. Your local system **must** be configured already to send mail or this will fail. https://stackoverflow.com/questions/27874102/executing-shell-mail-command-using-python https://nedbatchelder.com/text/unipain.html https://www.digitalocean.com/community/tutorials/how-to-install-and-configure-postfix-as-a-send-only-smtp-server-on-ubuntu-20-04 """ try: subprocess.run(['mail', '-s', subject, recipient], input=body, encoding='utf-8') log.debug(f"Email Notification Sent: Subject: {subject}, Recipient: {recipient}, Message: {body}") except subprocess.CalledProcessError as e: log.debug(f'send_email error: {e}') capture_exception(e) except Exception as e: log.debug(f'send_email: Unknown Error! Email not sent.') capture_exception(e) # Setup to send out Pushbullet alerts. Pushbullet config is in system_info.py def send_push_notification(title, message): """Part of our notification system. This handles sending PushBullets.""" try: pb = Pushbullet(system_info.pushbilletAPI) push = pb.push_note(title, message) log.debug(f"Pushbullet Notification Sent: {title} - {message}") except pb_errors.InvalidKeyError as e: log.debug(f'Pushbullet Exception: Invalid API Key! Message not sent.') capture_exception(e) except Exception as e: log.debug(f'Pushbullet Exception: Unknown Pushbullet Error: {e}. Message not sent.') capture_exception(e) def send_sms_notification(body, phone_number): """Part of our notification system. This handles sending SMS messages.""" try: client = Client(system_info.twilio_account, system_info.twilio_token) message = client.messages.create(to=phone_number, from_=system_info.twilio_from, body=body) log.debug(f"SMS Notification Sent: {body}.") except TwilioRestException as e: log.debug(f'Twilio Exception: {e}. Message not sent.') capture_exception(e) except Exception as e: log.debug(f'Twilio Exception: {e}. Message not sent.') capture_exception(e) def notify(title, message): """ Notify system for email, pushbullet and sms (via Twilio)""" log.debug(f'notify() called with Title: {title} and Message: {message}') if (read_config_data('plot_manager_config', 'notifications', 'alerting', True)): if (read_config_data('plot_manager_config', 'notifications', 'pb', True)): send_push_notification(title, message) if (read_config_data('plot_manager_config', 'notifications', 'email', True)): for email_address in system_info.alert_email: send_email(email_address, title, message) if (read_config_data('plot_manager_config', 'notifications', 'sms', True)): for phone_number in system_info.twilio_to: send_sms_notification(message, phone_number) else: pass # Thank You - https://frankcorso.dev/email-html-templates-jinja-python.html def send_template_email(template, recipient, subject, **kwargs): """Sends an email using a jinja template.""" env = Environment( loader=PackageLoader('drive_manager', 'templates'), autoescape=select_autoescape(['html', 'xml']) ) template = env.get_template(template) send_email(recipient, subject, template.render(**kwargs)) # This function called from crontab. First run the daily update (-ud) then (-dr): # 01 00 * * * /usr/bin/python3 /rxoot/plot_manager/drive_manager.py -ud >/dev/null 2>&1 # 02 00 * * * /usr/bin/python3 /rxoot/plot_manager/drive_manager.py -dr >/dev/null 2>&1 def send_daily_email(): log.debug('send_daily_email() Started') send_daily_update_email() log.info('Daily Update Email Sent!') def send_new_plot_notification(): log.debug('send_new_plot_notification() Started') if os.path.isfile('new_plot_received'): log.debug('New Plot Received') if read_config_data('plot_manager_config', 'notifications', 'per_plot', True): notify('New Plot Received', 'New Plot Received') os.remove('new_plot_received') def check_plots(): with open(chia_log_file, 'rb', 0) as f: m = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) i = m.rfind(b'Loaded') m.seek(i) line = m.readline() newline = line.decode("utf-8") x = newline.split() plots = x[4] TiB = float(x[8]) return plots, f'{TiB:.0f}' def main(): parser = init_argparser() args = parser.parse_args() if args.daily_report: send_daily_email() elif args.plot_report: space_report() elif args.update_daily: update_daily_plot_counts() elif args.check_temps: temperature_report() elif args.offline_hdd: online_offline_drive(args.offline_hdd, 'offline') elif get_offlined_drives(): if args.online_hdd: online_offline_drive(args.online_hdd, 'online') else: send_new_plot_notification() update_receive_plot() else: send_new_plot_notification() update_receive_plot() if __name__ == '__main__': main()
import abc import textwrap import warnings from copy import deepcopy from collections import namedtuple from collections.abc import Mapping import astropy.nddata import astropy.units as u import numpy as np from astropy.units import UnitsError try: # Import sunpy coordinates if available to register the frames and WCS functions with astropy import sunpy.coordinates # pylint: disable=unused-import # NOQA except ImportError: pass from astropy.wcs import WCS from astropy.wcs.utils import _split_matrix from astropy.wcs.wcsapi import BaseHighLevelWCS, HighLevelWCSWrapper from ndcube import utils from ndcube.extra_coords import ExtraCoords from ndcube.global_coords import GlobalCoords from ndcube.mixins import NDCubeSlicingMixin from ndcube.ndcube_sequence import NDCubeSequence from ndcube.utils.wcs_high_level_conversion import values_to_high_level_objects from ndcube.visualization import PlotterDescriptor from ndcube.wcs.wrappers import CompoundLowLevelWCS __all__ = ['NDCubeABC', 'NDCubeBase', 'NDCube'] class NDCubeABC(astropy.nddata.NDData, metaclass=abc.ABCMeta): @abc.abstractproperty def dimensions(self): """ The array dimensions of the cube. """ @abc.abstractmethod def crop(self, *points, wcs=None): """ Crop to the smallest cube in pixel space containing the world coordinate points. Parameters ---------- points: iterable of iterables Tuples of high level coordinate objects e.g. `~astropy.coordinates.SkyCoord`. The coordinates of the points **must be specified in Cartesian (WCS) order** as they are passed to `~astropy.wcs.wcsapi.BaseHighLevelWCS.world_to_array_index`. Therefore their number and order must be compatible with the API of that method. It is possible to not specify a coordinate for an axis by replacing any object with `None`. Any coordinate replaced by `None` will not be used to calculate pixel coordinates, and therefore not affect the calculation of the final bounding box. wcs: `astropy.wcs.wcsapi.BaseLowLevelWCS` The WCS to use to calculate the pixel coordinates based on the input. Will default to the ``.wcs`` property if not given. While any valid WCS could be used it is expected that either the ``.wcs``, ``.combined_wcs``, or ``.extra_coords`` properties will be used. Returns ------- result: `ndcube.NDCube` """ @abc.abstractmethod def crop_by_values(self, *points, units=None, wcs=None): """ Crop to the smallest cube in pixel space containing the world coordinate points. Parameters ---------- points: iterable of iterables Tuples of coordinates as `~astropy.units.Quantity` objects. The coordinates of the points **must be specified in Cartesian (WCS) order** as they are passed to `~astropy.wcs.wcsapi.BaseHighLevelWCS.world_to_array_index_values`. Therefore their number and order must be compatible with the API of that method. It is possible to not specify a coordinate for an axis by replacing any coordinate with `None`. Any coordinate replaced by `None` will not be used to calculate pixel coordinates, and therefore not affect the calculation of the final bounding box. Note that you must specify either none or all coordinates for any correlated axes, e.g. both spatial coordinates. units: iterable of `astropy.units.Unit` The unit of the corresponding entries in each point. Must therefore be the same length as the number of world axes. Only used if the corresponding type is not a `astropy.units.Quantity` or `None`. wcs: `astropy.wcs.wcsapi.BaseLowLevelWCS` The WCS to use to calculate the pixel coordinates based on the input. Will default to the ``.wcs`` property if not given. While any valid WCS could be used it is expected that either the ``.wcs``, ``.combined_wcs``, or ``.extra_coords`` properties will be used. Returns ------- result: `ndcube.NDCube` """ class NDCubeLinkedDescriptor: """ A descriptor which gives the property a reference to the cube to which it is attached. """ def __init__(self, default_type): self._default_type = default_type self._property_name = None def __set_name__(self, owner, name): """ This function is called when the class the descriptor is attached to is initialized. The *class* and not the instance. """ # property name is the name of the attribute on the parent class # pointing at an instance of this descriptor. self._property_name = name # attribute name is the name of the attribute on the parent class where # the data is stored. self._attribute_name = f"_{name}" def __get__(self, obj, objtype=None): if obj is None: return if getattr(obj, self._attribute_name, None) is None and self._default_type is not None: self.__set__(obj, self._default_type) return getattr(obj, self._attribute_name) def __set__(self, obj, value): if isinstance(value, self._default_type): value._ndcube = obj elif issubclass(value, self._default_type): value = value(obj) else: raise ValueError( f"Unable to set value for {self._property_name} it should " f"be an instance or subclass of {self._default_type}") setattr(obj, self._attribute_name, value) class NDCubeBase(NDCubeSlicingMixin, NDCubeABC): """ Class representing N-D data described by a single array and set of WCS transformations. Parameters ---------- data: array-like or `astropy.nddata.NDData` The array holding the actual data in this object. wcs: `astropy.wcs.wcsapi.BaseLowLevelWCS`, `astropy.wcs.wcsapi.BaseHighLevelWCS`, optional The WCS object containing the axes' information, optional only if ``data`` is an `astropy.nddata.NDData` object. uncertainty : any type, optional Uncertainty in the dataset. Should have an attribute uncertainty_type that defines what kind of uncertainty is stored, for example "std" for standard deviation or "var" for variance. A metaclass defining such an interface is `~astropy.nddata.NDUncertainty` - but isn’t mandatory. If the uncertainty has no such attribute the uncertainty is stored as `~astropy.nddata.UnknownUncertainty`. Defaults to None. mask : any type, optional Mask for the dataset. Masks should follow the numpy convention that valid data points are marked by `False` and invalid ones with `True`. Defaults to `None`. meta : dict-like object, optional Additional meta information about the dataset. If no meta is provided an empty dictionary is created. unit : Unit-like or `str`, optional Unit for the dataset. Strings that can be converted to a `~astropy.unit.Unit` are allowed. Default is `None` which results in dimensionless units. copy : bool, optional Indicates whether to save the arguments as copy. `True` copies every attribute before saving it while `False` tries to save every parameter as reference. Note however that it is not always possible to save the input as reference. Default is `False`. """ # Instances of Extra and Global coords are managed through descriptors _extra_coords = NDCubeLinkedDescriptor(ExtraCoords) _global_coords = NDCubeLinkedDescriptor(GlobalCoords) def __init__(self, data, wcs=None, uncertainty=None, mask=None, meta=None, unit=None, copy=False, **kwargs): super().__init__(data, uncertainty=uncertainty, mask=mask, meta=meta, unit=unit, copy=copy, **kwargs) if not self.wcs: self.wcs = wcs # This line is required as a patch for an astropy bug. # Above line is in if statement to prevent WCS being overwritten with None # if we are instantiating from an NDCube. # Enforce that the WCS object is not None if self.wcs is None: raise TypeError("The WCS argument can not be None.") # Get existing extra_coords if initializing from an NDCube if hasattr(data, "extra_coords"): extra_coords = data.extra_coords if copy: extra_coords = deepcopy(extra_coords) self._extra_coords = extra_coords # Get existing global_coords if initializing from an NDCube if hasattr(data, "global_coords"): global_coords = data._global_coords if copy: global_coords = deepcopy(global_coords) self._global_coords = global_coords @property def extra_coords(self): """ An `.ExtraCoords` object holding extra coordinates aligned to array axes. """ return self._extra_coords @property def global_coords(self): """ A `.GlobalCoords` object holding coordinate metadata not aligned to an array axis. """ return self._global_coords @property def combined_wcs(self): """ A `~astropy.wcs.wcsapi.BaseHighLevelWCS` object which combines ``.wcs`` with ``.extra_coords``. """ if not self.extra_coords.wcs: return self.wcs mapping = list(range(self.wcs.pixel_n_dim)) + list(self.extra_coords.mapping) return HighLevelWCSWrapper( CompoundLowLevelWCS(self.wcs.low_level_wcs, self._extra_coords.wcs, mapping=mapping) ) @property def dimensions(self): return u.Quantity(self.data.shape, unit=u.pix) @property def array_axis_physical_types(self): """ Returns the physical types associated with each array axis. Returns an iterable of tuples where each tuple corresponds to an array axis and holds strings denoting the physical types associated with that array axis. Since multiple physical types can be associated with one array axis, tuples can be of different lengths. Likewise, as a single physical type can correspond to multiple array axes, the same physical type string can appear in multiple tuples. The physical types are drawn from the WCS ExtraCoords objects. """ wcs = self.combined_wcs world_axis_physical_types = np.array(wcs.world_axis_physical_types) axis_correlation_matrix = wcs.axis_correlation_matrix return [tuple(world_axis_physical_types[axis_correlation_matrix[:, i]]) for i in range(axis_correlation_matrix.shape[1])][::-1] def _generate_world_coords(self, pixel_corners, wcs): # TODO: We can improve this by not always generating all coordinates # To make our lives easier here we generate all the coordinates for all # pixels and then choose the ones we want to return to the user based # on the axes argument. We could be smarter by integrating this logic # into the main loop, this would potentially reduce the number of calls # to pixel_to_world_values # Create meshgrid of all pixel coordinates. # If user, wants pixel_corners, set pixel values to pixel pixel_corners. # Else make pixel centers. pixel_shape = self.data.shape[::-1] if pixel_corners: pixel_shape = tuple(np.array(pixel_shape) + 1) ranges = [np.arange(i) - 0.5 for i in pixel_shape] else: ranges = [np.arange(i) for i in pixel_shape] # Limit the pixel dimensions to the ones present in the ExtraCoords if isinstance(wcs, ExtraCoords): ranges = [ranges[i] for i in wcs.mapping] wcs = wcs.wcs if wcs is None: return [] world_coords = [None] * wcs.world_n_dim for (pixel_axes_indices, world_axes_indices) in _split_matrix(wcs.axis_correlation_matrix): # First construct a range of pixel indices for this set of coupled dimensions sub_range = [ranges[idx] for idx in pixel_axes_indices] # Then get a set of non correlated dimensions non_corr_axes = set(list(range(wcs.pixel_n_dim))) - set(pixel_axes_indices) # And inject 0s for those coordinates for idx in non_corr_axes: sub_range.insert(idx, 0) # Generate a grid of broadcastable pixel indices for all pixel dimensions grid = np.meshgrid(*sub_range, indexing='ij') # Convert to world coordinates world = wcs.pixel_to_world_values(*grid) # TODO: this isinstance check is to mitigate https://github.com/spacetelescope/gwcs/pull/332 if wcs.world_n_dim == 1 and not isinstance(world, tuple): world = [world] # Extract the world coordinates of interest and remove any non-correlated axes # Transpose the world coordinates so they match array ordering not pixel for idx in world_axes_indices: array_slice = np.zeros((wcs.pixel_n_dim,), dtype=object) array_slice[wcs.axis_correlation_matrix[idx]] = slice(None) tmp_world = world[idx][tuple(array_slice)].T world_coords[idx] = tmp_world for i, (coord, unit) in enumerate(zip(world_coords, wcs.world_axis_units)): world_coords[i] = coord << u.Unit(unit) return world_coords @utils.cube.sanitize_wcs def axis_world_coords(self, *axes, pixel_corners=False, wcs=None): """ Returns WCS coordinate values of all pixels for all axes. Parameters ---------- axes: `int` or `str`, or multiple `int` or `str`, optional Axis number in numpy ordering or unique substring of `~ndcube.NDCube.world_axis_physical_types` of axes for which real world coordinates are desired. axes=None implies all axes will be returned. pixel_corners: `bool`, optional If `True` then instead of returning the coordinates at the centers of the pixels, the coordinates at the pixel corners will be returned. This increases the size of the output by 1 in all dimensions as all corners are returned. wcs: `astropy.wcs.wcsapi.BaseHighLevelWCS`, optional The WCS object to used to calculate the world coordinates. Although technically this can be any valid WCS, it will typically be ``self.wcs``, ``self.extra_coords``, or ``self.combined_wcs`` which combines both the WCS and extra coords. Defaults to the ``.wcs`` property. Returns ------- axes_coords: `list` An iterable of "high level" objects giving the real world coords for the axes requested by user. For example, a tuple of `~astropy.coordinates.SkyCoord` objects. The types returned are determined by the WCS object. The dimensionality of these objects should match that of their corresponding array dimensions, unless ``pixel_corners=True`` in which case the length along each axis will be 1 greater than the number of pixels. Example ------- >>> NDCube.all_world_coords(('lat', 'lon')) # doctest: +SKIP >>> NDCube.all_world_coords(2) # doctest: +SKIP """ if isinstance(wcs, BaseHighLevelWCS): wcs = wcs.low_level_wcs axes_coords = self._generate_world_coords(pixel_corners, wcs) if isinstance(wcs, ExtraCoords): wcs = wcs.wcs if not wcs: return tuple() axes_coords = values_to_high_level_objects(*axes_coords, low_level_wcs=wcs) if not axes: return tuple(axes_coords) object_names = np.array([wao_comp[0] for wao_comp in wcs.world_axis_object_components]) unique_obj_names = utils.misc.unique_sorted(object_names) world_axes_for_obj = [np.where(object_names == name)[0] for name in unique_obj_names] # Create a mapping from world index in the WCS to object index in axes_coords world_index_to_object_index = {} for object_index, world_axes in enumerate(world_axes_for_obj): for world_index in world_axes: world_index_to_object_index[world_index] = object_index world_indices = utils.wcs.calculate_world_indices_from_axes(wcs, axes) object_indices = utils.misc.unique_sorted( [world_index_to_object_index[world_index] for world_index in world_indices] ) return tuple(axes_coords[i] for i in object_indices) @utils.cube.sanitize_wcs def axis_world_coords_values(self, *axes, pixel_corners=False, wcs=None): """ Returns WCS coordinate values of all pixels for desired axes. Parameters ---------- axes: `int` or `str`, or multiple `int` or `str`, optional Axis number in numpy ordering or unique substring of `~ndcube.NDCube.wcs.world_axis_physical_types` of axes for which real world coordinates are desired. axes=None implies all axes will be returned. pixel_corners: `bool`, optional If `True` then instead of returning the coordinates of the pixel centers the coordinates of the pixel corners will be returned. This increases the size of the output along each dimension by 1 as all corners are returned. wcs: `astropy.wcs.wcsapi.BaseHighLevelWCS`, optional The WCS object to used to calculate the world coordinates. Although technically this can be any valid WCS, it will typically be ``self.wcs``, ``self.extra_coords``, or ``self.combined_wcs``, combing both the WCS and extra coords. Defaults to the ``.wcs`` property. Returns ------- axes_coords: `list` An iterable of "high level" objects giving the real world coords for the axes requested by user. For example, a tuple of `~astropy.coordinates.SkyCoord` objects. The types returned are determined by the WCS object. The dimensionality of these objects should match that of their corresponding array dimensions, unless ``pixel_corners=True`` in which case the length along each axis will be 1 greater than the number of pixels. Example ------- >>> NDCube.all_world_coords_values(('lat', 'lon')) # doctest: +SKIP >>> NDCube.all_world_coords_values(2) # doctest: +SKIP """ if isinstance(wcs, BaseHighLevelWCS): wcs = wcs.low_level_wcs axes_coords = self._generate_world_coords(pixel_corners, wcs) if isinstance(wcs, ExtraCoords): wcs = wcs.wcs world_axis_physical_types = wcs.world_axis_physical_types # If user has supplied axes, extract only the # world coords that correspond to those axes. if axes: world_indices = utils.wcs.calculate_world_indices_from_axes(wcs, axes) axes_coords = [axes_coords[i] for i in world_indices] world_axis_physical_types = tuple(np.array(world_axis_physical_types)[world_indices]) # Return in array order. # First replace characters in physical types forbidden for namedtuple identifiers. identifiers = [] for physical_type in world_axis_physical_types[::-1]: identifier = physical_type.replace(":", "_") identifier = identifier.replace(".", "_") identifier = identifier.replace("-", "__") identifiers.append(identifier) CoordValues = namedtuple("CoordValues", identifiers) return CoordValues(*axes_coords[::-1]) def crop(self, *points, wcs=None): # The docstring is defined in NDCubeABC # Calculate the array slice item corresponding to bounding box and return sliced cube. item = self._get_crop_item(*points, wcs=wcs) return self[item] @utils.cube.sanitize_wcs def _get_crop_item(self, *points, wcs=None): # Sanitize inputs. no_op, points, wcs = utils.cube.sanitize_crop_inputs(points, wcs) # Quit out early if we are no-op if no_op: return tuple([slice(None)] * wcs.pixel_n_dim) else: comp = [c[0] for c in wcs.world_axis_object_components] # Trim to unique component names - `np.unique(..., return_index=True) # keeps sorting alphabetically, set() seems just nondeterministic. for k, c in enumerate(comp): if comp.count(c) > 1: comp.pop(k) classes = [wcs.world_axis_object_classes[c][0] for c in comp] for i, point in enumerate(points): if len(point) != len(comp): raise ValueError(f"{len(point)} components in point {i} do not match " f"WCS with {len(comp)} components.") for j, value in enumerate(point): if not (value is None or isinstance(value, classes[j])): raise TypeError(f"{type(value)} of component {j} in point {i} is " f"incompatible with WCS component {comp[j]} " f"{type(classes[j])}.") return utils.cube.get_crop_item_from_points(points, wcs, False) def crop_by_values(self, *points, units=None, wcs=None): # The docstring is defined in NDCubeABC # Calculate the array slice item corresponding to bounding box and return sliced cube. item = self._get_crop_by_values_item(*points, units=units, wcs=wcs) return self[item] @utils.cube.sanitize_wcs def _get_crop_by_values_item(self, *points, units=None, wcs=None): # Sanitize inputs. no_op, points, wcs = utils.cube.sanitize_crop_inputs(points, wcs) # Quit out early if we are no-op if no_op: return tuple([slice(None)] * wcs.pixel_n_dim) # Convert float inputs to quantities using units. n_coords = len(points[0]) if units is None: units = [None] * n_coords elif len(units) != n_coords: raise ValueError(f"Units must be None or have same length {n_coords} as corner inputs.") types_with_units = (u.Quantity, type(None)) for i, point in enumerate(points): if len(point) != wcs.world_n_dim: raise ValueError(f"{len(point)} dimensions in point {i} do not match " f"WCS with {wcs.world_n_dim} world dimensions.") for j, (value, unit) in enumerate(zip(point, units)): value_is_float = not isinstance(value, types_with_units) if value_is_float: if unit is None: raise TypeError( "If an element of a point is not a Quantity or None, " "the corresponding unit must be a valid astropy Unit or unit string." f"index: {i}; coord type: {type(value)}; unit: {unit}") points[i][j] = u.Quantity(value, unit=unit) if value is not None: try: points[i][j] = points[i][j].to(wcs.world_axis_units[j]) except UnitsError as err: raise UnitsError(f"Unit '{points[i][j].unit}' of coordinate object {j} in point {i} is " f"incompatible with WCS unit '{wcs.world_axis_units[j]}'") from err return utils.cube.get_crop_item_from_points(points, wcs, True) def __str__(self): return textwrap.dedent(f"""\ NDCube ------ Dimensions: {self.dimensions} Physical Types of Axes: {self.array_axis_physical_types} Unit: {self.unit} Data Type: {self.data.dtype}""") def __repr__(self): return f"{object.__repr__(self)}\n{str(self)}" def explode_along_axis(self, axis): """ Separates slices of NDCubes along a given axis into an NDCubeSequence of (N-1)DCubes. Parameters ---------- axis : `int` The array axis along which the data is to be changed. Returns ------- result : `ndcube.NDCubeSequence` """ # If axis is -ve then calculate the axis from the length of the dimensions of one cube if axis < 0: axis = len(self.dimensions) + axis # To store the resultant cube result_cubes = [] # All slices are initially initialised as slice(None, None, None) cube_slices = [slice(None, None, None)] * self.data.ndim # Slicing the cube inside result_cube for i in range(self.data.shape[axis]): # Setting the slice value to the index so that the slices are done correctly. cube_slices[axis] = i # Set to None the metadata of sliced cubes. item = tuple(cube_slices) sliced_cube = self[item] sliced_cube.meta = None # Appending the sliced cubes in the result_cube list result_cubes.append(sliced_cube) # Creating a new NDCubeSequence with the result_cubes and common axis as axis return NDCubeSequence(result_cubes, meta=self.meta) def _validate_algorithm_and_order(self, algorithm, order): order_compatibility = { 'interpolation': ['nearest-neighbor', 'bilinear', 'biquadratic', 'bicubic'], 'adaptive': ['nearest-neighbor', 'bilinear'], 'exact': [] } if algorithm in order_compatibility: if order_compatibility[algorithm] and order not in order_compatibility[algorithm]: raise ValueError(f"For '{algorithm}' algorithm, the 'order' argument must be " f"one of {", ".join(order_compatibility[algorithm])}.") else: raise ValueError(f"The 'algorithm' argument must be one of " f"{", ".join(order_compatibility.keys())}.") def reproject_to(self, target_wcs, algorithm='interpolation', shape_out=None, order='bilinear', output_array=None, parallel=False, return_footprint=False): """ Reprojects this NDCube to the coordinates described by another WCS object. Parameters ---------- algorithm: `str` The algorithm to use for reprojecting. This can be any of: 'interpolation', 'adaptive', and 'exact'. target_wcs : `astropy.wcs.wcsapi.BaseHighLevelWCS`, `astropy.wcs.wcsapi.BaseLowLevelWCS`, or `astropy.io.fits.Header` The WCS object to which the ``NDCube`` is to be reprojected. shape_out: `tuple`, optional The shape of the output data array. The ordering of the dimensions must follow NumPy ordering and not the WCS pixel shape. If not specified, `~astropy.wcs.wcsapi.BaseLowLevelWCS.array_shape` attribute (if available) from the low level API of the ``target_wcs`` is used. order: `int` or `str` The order of the interpolation (used only when the 'interpolation' or 'adaptive' algorithm is selected). For 'interpolation' algorithm, this can be any of: 'nearest-neighbor', 'bilinear', 'biquadratic', and 'bicubic'. For 'adaptive' algorithm, this can be either 'nearest-neighbor' or 'bilinear'. output_array: `numpy.ndarray`, optional An array in which to store the reprojected data. This can be any numpy array including a memory map, which may be helpful when dealing with extremely large files. parallel: `bool` or `int` Flag for parallel implementation (used only when the 'exact' algorithm is selected). If ``True``, a parallel implementation is chosen and the number of processes is selected automatically as the number of logical CPUs detected on the machine. If ``False``, a serial implementation is chosen. If the flag is a positive integer n greater than one, a parallel implementation using n processes is chosen. return_footprint: `bool` Whether to return the footprint in addition to the output NDCube. Returns ------- resampled_cube : `ndcube.NDCube` A new resultant NDCube object, the supplied ``target_wcs`` will be the ``.wcs`` attribute of the output ``NDCube``. footprint: `numpy.ndarray` Footprint of the input array in the output array. Values of 0 indicate no coverage or valid values in the input image, while values of 1 indicate valid values. Notes ----- This method doesn't support handling of the ``mask``, ``extra_coords``, and ``uncertainty`` attributes yet. However, ``meta`` and ``global_coords`` are copied to the output ``NDCube``. """ try: from reproject import reproject_adaptive, reproject_exact, reproject_interp from reproject.wcs_utils import has_celestial except ModuleNotFoundError: raise ImportError("The NDCube.reproject_to method requires the optional package `reproject`.") if isinstance(target_wcs, Mapping): target_wcs = WCS(header=target_wcs) low_level_target_wcs = utils.wcs.get_low_level_wcs(target_wcs, 'target_wcs') # 'adaptive' and 'exact' algorithms work only on 2D celestial WCS. if algorithm == 'adaptive' or algorithm == 'exact': if low_level_target_wcs.pixel_n_dim != 2 or low_level_target_wcs.world_n_dim != 2: raise ValueError('For adaptive and exact algorithms, target_wcs must be 2D.') if not has_celestial(target_wcs): raise ValueError('For adaptive and exact algorithms, ' 'target_wcs must contain celestial axes only.') if not utils.wcs.compare_wcs_physical_types(self.wcs, target_wcs): raise ValueError('Given target_wcs is not compatible with this NDCube, the physical types do not match.') # If shape_out is not specified explicity, try to extract it from the low level WCS if not shape_out: if hasattr(low_level_target_wcs, 'array_shape') and low_level_target_wcs.array_shape is not None: shape_out = low_level_target_wcs.array_shape else: raise ValueError("shape_out must be specified if target_wcs does not have the array_shape attribute.") self._validate_algorithm_and_order(algorithm, order) if algorithm == 'interpolation': data = reproject_interp(self, output_projection=target_wcs, shape_out=shape_out, order=order, output_array=output_array, return_footprint=return_footprint) elif algorithm == 'adaptive': data = reproject_adaptive(self, output_projection=target_wcs, shape_out=shape_out, order=order, return_footprint=return_footprint) elif algorithm == 'exact': data = reproject_exact(self, output_projection=target_wcs, shape_out=shape_out, parallel=parallel, return_footprint=return_footprint) if return_footprint: data, footprint = data resampled_cube = type(self)(data, wcs=target_wcs, meta=deepcopy(self.meta)) resampled_cube._global_coords = deepcopy(self.global_coords) if return_footprint: return resampled_cube, footprint return resampled_cube class NDCube(NDCubeBase): """ Class representing N-D data described by a single array and set of WCS transformations. Parameters ---------- data: `numpy.ndarray` The array holding the actual data in this object. wcs: `astropy.wcs.wcsapi.BaseLowLevelWCS`, `astropy.wcs.wcsapi.BaseHighLevelWCS`, optional The WCS object containing the axes' information, optional only if ``data`` is an `astropy.nddata.NDData` object. uncertainty : any type, optional Uncertainty in the dataset. Should have an attribute uncertainty_type that defines what kind of uncertainty is stored, for example "std" for standard deviation or "var" for variance. A metaclass defining such an interface is NDUncertainty - but isn’t mandatory. If the uncertainty has no such attribute the uncertainty is stored as UnknownUncertainty. Defaults to None. mask : any type, optional Mask for the dataset. Masks should follow the numpy convention that valid data points are marked by False and invalid ones with True. Defaults to None. meta : dict-like object, optional Additional meta information about the dataset. If no meta is provided an empty collections.OrderedDict is created. Default is None. unit : Unit-like or str, optional Unit for the dataset. Strings that can be converted to a Unit are allowed. Default is None. extra_coords : iterable of `tuple`, each with three entries (`str`, `int`, `astropy.units.quantity` or array-like) Gives the name, axis of data, and values of coordinates of a data axis not included in the WCS object. copy : bool, optional Indicates whether to save the arguments as copy. True copies every attribute before saving it while False tries to save every parameter as reference. Note however that it is not always possible to save the input as reference. Default is False. """ # We special case the default mpl plotter here so that we can only import # matplotlib when `.plotter` is accessed and raise an ImportError at the # last moment. plotter = PlotterDescriptor(default_type="mpl_plotter") """ A `~.MatplotlibPlotter` instance providing visualization methods. The type of this attribute can be changed to provide custom visualization functionality. """ def _as_mpl_axes(self): if hasattr(self.plotter, "_as_mpl_axes"): return self.plotter._as_mpl_axes() else: warnings.warn(f"The current plotter {self.plotter} does not have a '_as_mpl_axes' method. " "The default MatplotlibPlotter._as_mpl_axes method will be used instead.", UserWarning) plotter = MatplotlibPlotter(self) return plotter._as_mpl_axes() def plot(self, *args, **kwargs): """ A convenience function for the plotters default ``plot()`` method. Calling this method is the same as calling ``cube.plotter.plot``, the behaviour of this method can change if the `NDCube.plotter` class is set to a different ``Plotter`` class. """ if self.plotter is None: raise NotImplementedError( "This NDCube object does not have a .plotter defined so " "no default plotting functionality is available.") return self.plotter.plot(*args, **kwargs)
import abc import textwrap import warnings from copy import deepcopy from collections import namedtuple from collections.abc import Mapping import astropy.nddata import astropy.units as u import numpy as np from astropy.units import UnitsError try: # Import sunpy coordinates if available to register the frames and WCS functions with astropy import sunpy.coordinates # pylint: disable=unused-import # NOQA except ImportError: pass from astropy.wcs import WCS from astropy.wcs.utils import _split_matrix from astropy.wcs.wcsapi import BaseHighLevelWCS, HighLevelWCSWrapper from ndcube import utils from ndcube.extra_coords import ExtraCoords from ndcube.global_coords import GlobalCoords from ndcube.mixins import NDCubeSlicingMixin from ndcube.ndcube_sequence import NDCubeSequence from ndcube.utils.wcs_high_level_conversion import values_to_high_level_objects from ndcube.visualization import PlotterDescriptor from ndcube.wcs.wrappers import CompoundLowLevelWCS __all__ = ['NDCubeABC', 'NDCubeBase', 'NDCube'] class NDCubeABC(astropy.nddata.NDData, metaclass=abc.ABCMeta): @abc.abstractproperty def dimensions(self): """ The array dimensions of the cube. """ @abc.abstractmethod def crop(self, *points, wcs=None): """ Crop to the smallest cube in pixel space containing the world coordinate points. Parameters ---------- points: iterable of iterables Tuples of high level coordinate objects e.g. `~astropy.coordinates.SkyCoord`. The coordinates of the points **must be specified in Cartesian (WCS) order** as they are passed to `~astropy.wcs.wcsapi.BaseHighLevelWCS.world_to_array_index`. Therefore their number and order must be compatible with the API of that method. It is possible to not specify a coordinate for an axis by replacing any object with `None`. Any coordinate replaced by `None` will not be used to calculate pixel coordinates, and therefore not affect the calculation of the final bounding box. wcs: `astropy.wcs.wcsapi.BaseLowLevelWCS` The WCS to use to calculate the pixel coordinates based on the input. Will default to the ``.wcs`` property if not given. While any valid WCS could be used it is expected that either the ``.wcs``, ``.combined_wcs``, or ``.extra_coords`` properties will be used. Returns ------- result: `ndcube.NDCube` """ @abc.abstractmethod def crop_by_values(self, *points, units=None, wcs=None): """ Crop to the smallest cube in pixel space containing the world coordinate points. Parameters ---------- points: iterable of iterables Tuples of coordinates as `~astropy.units.Quantity` objects. The coordinates of the points **must be specified in Cartesian (WCS) order** as they are passed to `~astropy.wcs.wcsapi.BaseHighLevelWCS.world_to_array_index_values`. Therefore their number and order must be compatible with the API of that method. It is possible to not specify a coordinate for an axis by replacing any coordinate with `None`. Any coordinate replaced by `None` will not be used to calculate pixel coordinates, and therefore not affect the calculation of the final bounding box. Note that you must specify either none or all coordinates for any correlated axes, e.g. both spatial coordinates. units: iterable of `astropy.units.Unit` The unit of the corresponding entries in each point. Must therefore be the same length as the number of world axes. Only used if the corresponding type is not a `astropy.units.Quantity` or `None`. wcs: `astropy.wcs.wcsapi.BaseLowLevelWCS` The WCS to use to calculate the pixel coordinates based on the input. Will default to the ``.wcs`` property if not given. While any valid WCS could be used it is expected that either the ``.wcs``, ``.combined_wcs``, or ``.extra_coords`` properties will be used. Returns ------- result: `ndcube.NDCube` """ class NDCubeLinkedDescriptor: """ A descriptor which gives the property a reference to the cube to which it is attached. """ def __init__(self, default_type): self._default_type = default_type self._property_name = None def __set_name__(self, owner, name): """ This function is called when the class the descriptor is attached to is initialized. The *class* and not the instance. """ # property name is the name of the attribute on the parent class # pointing at an instance of this descriptor. self._property_name = name # attribute name is the name of the attribute on the parent class where # the data is stored. self._attribute_name = f"_{name}" def __get__(self, obj, objtype=None): if obj is None: return if getattr(obj, self._attribute_name, None) is None and self._default_type is not None: self.__set__(obj, self._default_type) return getattr(obj, self._attribute_name) def __set__(self, obj, value): if isinstance(value, self._default_type): value._ndcube = obj elif issubclass(value, self._default_type): value = value(obj) else: raise ValueError( f"Unable to set value for {self._property_name} it should " f"be an instance or subclass of {self._default_type}") setattr(obj, self._attribute_name, value) class NDCubeBase(NDCubeSlicingMixin, NDCubeABC): """ Class representing N-D data described by a single array and set of WCS transformations. Parameters ---------- data: array-like or `astropy.nddata.NDData` The array holding the actual data in this object. wcs: `astropy.wcs.wcsapi.BaseLowLevelWCS`, `astropy.wcs.wcsapi.BaseHighLevelWCS`, optional The WCS object containing the axes' information, optional only if ``data`` is an `astropy.nddata.NDData` object. uncertainty : any type, optional Uncertainty in the dataset. Should have an attribute uncertainty_type that defines what kind of uncertainty is stored, for example "std" for standard deviation or "var" for variance. A metaclass defining such an interface is `~astropy.nddata.NDUncertainty` - but isn’t mandatory. If the uncertainty has no such attribute the uncertainty is stored as `~astropy.nddata.UnknownUncertainty`. Defaults to None. mask : any type, optional Mask for the dataset. Masks should follow the numpy convention that valid data points are marked by `False` and invalid ones with `True`. Defaults to `None`. meta : dict-like object, optional Additional meta information about the dataset. If no meta is provided an empty dictionary is created. unit : Unit-like or `str`, optional Unit for the dataset. Strings that can be converted to a `~astropy.unit.Unit` are allowed. Default is `None` which results in dimensionless units. copy : bool, optional Indicates whether to save the arguments as copy. `True` copies every attribute before saving it while `False` tries to save every parameter as reference. Note however that it is not always possible to save the input as reference. Default is `False`. """ # Instances of Extra and Global coords are managed through descriptors _extra_coords = NDCubeLinkedDescriptor(ExtraCoords) _global_coords = NDCubeLinkedDescriptor(GlobalCoords) def __init__(self, data, wcs=None, uncertainty=None, mask=None, meta=None, unit=None, copy=False, **kwargs): super().__init__(data, uncertainty=uncertainty, mask=mask, meta=meta, unit=unit, copy=copy, **kwargs) if not self.wcs: self.wcs = wcs # This line is required as a patch for an astropy bug. # Above line is in if statement to prevent WCS being overwritten with None # if we are instantiating from an NDCube. # Enforce that the WCS object is not None if self.wcs is None: raise TypeError("The WCS argument can not be None.") # Get existing extra_coords if initializing from an NDCube if hasattr(data, "extra_coords"): extra_coords = data.extra_coords if copy: extra_coords = deepcopy(extra_coords) self._extra_coords = extra_coords # Get existing global_coords if initializing from an NDCube if hasattr(data, "global_coords"): global_coords = data._global_coords if copy: global_coords = deepcopy(global_coords) self._global_coords = global_coords @property def extra_coords(self): """ An `.ExtraCoords` object holding extra coordinates aligned to array axes. """ return self._extra_coords @property def global_coords(self): """ A `.GlobalCoords` object holding coordinate metadata not aligned to an array axis. """ return self._global_coords @property def combined_wcs(self): """ A `~astropy.wcs.wcsapi.BaseHighLevelWCS` object which combines ``.wcs`` with ``.extra_coords``. """ if not self.extra_coords.wcs: return self.wcs mapping = list(range(self.wcs.pixel_n_dim)) + list(self.extra_coords.mapping) return HighLevelWCSWrapper( CompoundLowLevelWCS(self.wcs.low_level_wcs, self._extra_coords.wcs, mapping=mapping) ) @property def dimensions(self): return u.Quantity(self.data.shape, unit=u.pix) @property def array_axis_physical_types(self): """ Returns the physical types associated with each array axis. Returns an iterable of tuples where each tuple corresponds to an array axis and holds strings denoting the physical types associated with that array axis. Since multiple physical types can be associated with one array axis, tuples can be of different lengths. Likewise, as a single physical type can correspond to multiple array axes, the same physical type string can appear in multiple tuples. The physical types are drawn from the WCS ExtraCoords objects. """ wcs = self.combined_wcs world_axis_physical_types = np.array(wcs.world_axis_physical_types) axis_correlation_matrix = wcs.axis_correlation_matrix return [tuple(world_axis_physical_types[axis_correlation_matrix[:, i]]) for i in range(axis_correlation_matrix.shape[1])][::-1] def _generate_world_coords(self, pixel_corners, wcs): # TODO: We can improve this by not always generating all coordinates # To make our lives easier here we generate all the coordinates for all # pixels and then choose the ones we want to return to the user based # on the axes argument. We could be smarter by integrating this logic # into the main loop, this would potentially reduce the number of calls # to pixel_to_world_values # Create meshgrid of all pixel coordinates. # If user, wants pixel_corners, set pixel values to pixel pixel_corners. # Else make pixel centers. pixel_shape = self.data.shape[::-1] if pixel_corners: pixel_shape = tuple(np.array(pixel_shape) + 1) ranges = [np.arange(i) - 0.5 for i in pixel_shape] else: ranges = [np.arange(i) for i in pixel_shape] # Limit the pixel dimensions to the ones present in the ExtraCoords if isinstance(wcs, ExtraCoords): ranges = [ranges[i] for i in wcs.mapping] wcs = wcs.wcs if wcs is None: return [] world_coords = [None] * wcs.world_n_dim for (pixel_axes_indices, world_axes_indices) in _split_matrix(wcs.axis_correlation_matrix): # First construct a range of pixel indices for this set of coupled dimensions sub_range = [ranges[idx] for idx in pixel_axes_indices] # Then get a set of non correlated dimensions non_corr_axes = set(list(range(wcs.pixel_n_dim))) - set(pixel_axes_indices) # And inject 0s for those coordinates for idx in non_corr_axes: sub_range.insert(idx, 0) # Generate a grid of broadcastable pixel indices for all pixel dimensions grid = np.meshgrid(*sub_range, indexing='ij') # Convert to world coordinates world = wcs.pixel_to_world_values(*grid) # TODO: this isinstance check is to mitigate https://github.com/spacetelescope/gwcs/pull/332 if wcs.world_n_dim == 1 and not isinstance(world, tuple): world = [world] # Extract the world coordinates of interest and remove any non-correlated axes # Transpose the world coordinates so they match array ordering not pixel for idx in world_axes_indices: array_slice = np.zeros((wcs.pixel_n_dim,), dtype=object) array_slice[wcs.axis_correlation_matrix[idx]] = slice(None) tmp_world = world[idx][tuple(array_slice)].T world_coords[idx] = tmp_world for i, (coord, unit) in enumerate(zip(world_coords, wcs.world_axis_units)): world_coords[i] = coord << u.Unit(unit) return world_coords @utils.cube.sanitize_wcs def axis_world_coords(self, *axes, pixel_corners=False, wcs=None): """ Returns WCS coordinate values of all pixels for all axes. Parameters ---------- axes: `int` or `str`, or multiple `int` or `str`, optional Axis number in numpy ordering or unique substring of `~ndcube.NDCube.world_axis_physical_types` of axes for which real world coordinates are desired. axes=None implies all axes will be returned. pixel_corners: `bool`, optional If `True` then instead of returning the coordinates at the centers of the pixels, the coordinates at the pixel corners will be returned. This increases the size of the output by 1 in all dimensions as all corners are returned. wcs: `astropy.wcs.wcsapi.BaseHighLevelWCS`, optional The WCS object to used to calculate the world coordinates. Although technically this can be any valid WCS, it will typically be ``self.wcs``, ``self.extra_coords``, or ``self.combined_wcs`` which combines both the WCS and extra coords. Defaults to the ``.wcs`` property. Returns ------- axes_coords: `list` An iterable of "high level" objects giving the real world coords for the axes requested by user. For example, a tuple of `~astropy.coordinates.SkyCoord` objects. The types returned are determined by the WCS object. The dimensionality of these objects should match that of their corresponding array dimensions, unless ``pixel_corners=True`` in which case the length along each axis will be 1 greater than the number of pixels. Example ------- >>> NDCube.all_world_coords(('lat', 'lon')) # doctest: +SKIP >>> NDCube.all_world_coords(2) # doctest: +SKIP """ if isinstance(wcs, BaseHighLevelWCS): wcs = wcs.low_level_wcs axes_coords = self._generate_world_coords(pixel_corners, wcs) if isinstance(wcs, ExtraCoords): wcs = wcs.wcs if not wcs: return tuple() axes_coords = values_to_high_level_objects(*axes_coords, low_level_wcs=wcs) if not axes: return tuple(axes_coords) object_names = np.array([wao_comp[0] for wao_comp in wcs.world_axis_object_components]) unique_obj_names = utils.misc.unique_sorted(object_names) world_axes_for_obj = [np.where(object_names == name)[0] for name in unique_obj_names] # Create a mapping from world index in the WCS to object index in axes_coords world_index_to_object_index = {} for object_index, world_axes in enumerate(world_axes_for_obj): for world_index in world_axes: world_index_to_object_index[world_index] = object_index world_indices = utils.wcs.calculate_world_indices_from_axes(wcs, axes) object_indices = utils.misc.unique_sorted( [world_index_to_object_index[world_index] for world_index in world_indices] ) return tuple(axes_coords[i] for i in object_indices) @utils.cube.sanitize_wcs def axis_world_coords_values(self, *axes, pixel_corners=False, wcs=None): """ Returns WCS coordinate values of all pixels for desired axes. Parameters ---------- axes: `int` or `str`, or multiple `int` or `str`, optional Axis number in numpy ordering or unique substring of `~ndcube.NDCube.wcs.world_axis_physical_types` of axes for which real world coordinates are desired. axes=None implies all axes will be returned. pixel_corners: `bool`, optional If `True` then instead of returning the coordinates of the pixel centers the coordinates of the pixel corners will be returned. This increases the size of the output along each dimension by 1 as all corners are returned. wcs: `astropy.wcs.wcsapi.BaseHighLevelWCS`, optional The WCS object to used to calculate the world coordinates. Although technically this can be any valid WCS, it will typically be ``self.wcs``, ``self.extra_coords``, or ``self.combined_wcs``, combing both the WCS and extra coords. Defaults to the ``.wcs`` property. Returns ------- axes_coords: `list` An iterable of "high level" objects giving the real world coords for the axes requested by user. For example, a tuple of `~astropy.coordinates.SkyCoord` objects. The types returned are determined by the WCS object. The dimensionality of these objects should match that of their corresponding array dimensions, unless ``pixel_corners=True`` in which case the length along each axis will be 1 greater than the number of pixels. Example ------- >>> NDCube.all_world_coords_values(('lat', 'lon')) # doctest: +SKIP >>> NDCube.all_world_coords_values(2) # doctest: +SKIP """ if isinstance(wcs, BaseHighLevelWCS): wcs = wcs.low_level_wcs axes_coords = self._generate_world_coords(pixel_corners, wcs) if isinstance(wcs, ExtraCoords): wcs = wcs.wcs world_axis_physical_types = wcs.world_axis_physical_types # If user has supplied axes, extract only the # world coords that correspond to those axes. if axes: world_indices = utils.wcs.calculate_world_indices_from_axes(wcs, axes) axes_coords = [axes_coords[i] for i in world_indices] world_axis_physical_types = tuple(np.array(world_axis_physical_types)[world_indices]) # Return in array order. # First replace characters in physical types forbidden for namedtuple identifiers. identifiers = [] for physical_type in world_axis_physical_types[::-1]: identifier = physical_type.replace(":", "_") identifier = identifier.replace(".", "_") identifier = identifier.replace("-", "__") identifiers.append(identifier) CoordValues = namedtuple("CoordValues", identifiers) return CoordValues(*axes_coords[::-1]) def crop(self, *points, wcs=None): # The docstring is defined in NDCubeABC # Calculate the array slice item corresponding to bounding box and return sliced cube. item = self._get_crop_item(*points, wcs=wcs) return self[item] @utils.cube.sanitize_wcs def _get_crop_item(self, *points, wcs=None): # Sanitize inputs. no_op, points, wcs = utils.cube.sanitize_crop_inputs(points, wcs) # Quit out early if we are no-op if no_op: return tuple([slice(None)] * wcs.pixel_n_dim) else: comp = [c[0] for c in wcs.world_axis_object_components] # Trim to unique component names - `np.unique(..., return_index=True) # keeps sorting alphabetically, set() seems just nondeterministic. for k, c in enumerate(comp): if comp.count(c) > 1: comp.pop(k) classes = [wcs.world_axis_object_classes[c][0] for c in comp] for i, point in enumerate(points): if len(point) != len(comp): raise ValueError(f"{len(point)} components in point {i} do not match " f"WCS with {len(comp)} components.") for j, value in enumerate(point): if not (value is None or isinstance(value, classes[j])): raise TypeError(f"{type(value)} of component {j} in point {i} is " f"incompatible with WCS component {comp[j]} " f"{type(classes[j])}.") return utils.cube.get_crop_item_from_points(points, wcs, False) def crop_by_values(self, *points, units=None, wcs=None): # The docstring is defined in NDCubeABC # Calculate the array slice item corresponding to bounding box and return sliced cube. item = self._get_crop_by_values_item(*points, units=units, wcs=wcs) return self[item] @utils.cube.sanitize_wcs def _get_crop_by_values_item(self, *points, units=None, wcs=None): # Sanitize inputs. no_op, points, wcs = utils.cube.sanitize_crop_inputs(points, wcs) # Quit out early if we are no-op if no_op: return tuple([slice(None)] * wcs.pixel_n_dim) # Convert float inputs to quantities using units. n_coords = len(points[0]) if units is None: units = [None] * n_coords elif len(units) != n_coords: raise ValueError(f"Units must be None or have same length {n_coords} as corner inputs.") types_with_units = (u.Quantity, type(None)) for i, point in enumerate(points): if len(point) != wcs.world_n_dim: raise ValueError(f"{len(point)} dimensions in point {i} do not match " f"WCS with {wcs.world_n_dim} world dimensions.") for j, (value, unit) in enumerate(zip(point, units)): value_is_float = not isinstance(value, types_with_units) if value_is_float: if unit is None: raise TypeError( "If an element of a point is not a Quantity or None, " "the corresponding unit must be a valid astropy Unit or unit string." f"index: {i}; coord type: {type(value)}; unit: {unit}") points[i][j] = u.Quantity(value, unit=unit) if value is not None: try: points[i][j] = points[i][j].to(wcs.world_axis_units[j]) except UnitsError as err: raise UnitsError(f"Unit '{points[i][j].unit}' of coordinate object {j} in point {i} is " f"incompatible with WCS unit '{wcs.world_axis_units[j]}'") from err return utils.cube.get_crop_item_from_points(points, wcs, True) def __str__(self): return textwrap.dedent(f"""\ NDCube ------ Dimensions: {self.dimensions} Physical Types of Axes: {self.array_axis_physical_types} Unit: {self.unit} Data Type: {self.data.dtype}""") def __repr__(self): return f"{object.__repr__(self)}\n{str(self)}" def explode_along_axis(self, axis): """ Separates slices of NDCubes along a given axis into an NDCubeSequence of (N-1)DCubes. Parameters ---------- axis : `int` The array axis along which the data is to be changed. Returns ------- result : `ndcube.NDCubeSequence` """ # If axis is -ve then calculate the axis from the length of the dimensions of one cube if axis < 0: axis = len(self.dimensions) + axis # To store the resultant cube result_cubes = [] # All slices are initially initialised as slice(None, None, None) cube_slices = [slice(None, None, None)] * self.data.ndim # Slicing the cube inside result_cube for i in range(self.data.shape[axis]): # Setting the slice value to the index so that the slices are done correctly. cube_slices[axis] = i # Set to None the metadata of sliced cubes. item = tuple(cube_slices) sliced_cube = self[item] sliced_cube.meta = None # Appending the sliced cubes in the result_cube list result_cubes.append(sliced_cube) # Creating a new NDCubeSequence with the result_cubes and common axis as axis return NDCubeSequence(result_cubes, meta=self.meta) def _validate_algorithm_and_order(self, algorithm, order): order_compatibility = { 'interpolation': ['nearest-neighbor', 'bilinear', 'biquadratic', 'bicubic'], 'adaptive': ['nearest-neighbor', 'bilinear'], 'exact': [] } if algorithm in order_compatibility: if order_compatibility[algorithm] and order not in order_compatibility[algorithm]: raise ValueError(f"For '{algorithm}' algorithm, the 'order' argument must be " f"one of {', '.join(order_compatibility[algorithm])}.") else: raise ValueError(f"The 'algorithm' argument must be one of " f"{', '.join(order_compatibility.keys())}.") def reproject_to(self, target_wcs, algorithm='interpolation', shape_out=None, order='bilinear', output_array=None, parallel=False, return_footprint=False): """ Reprojects this NDCube to the coordinates described by another WCS object. Parameters ---------- algorithm: `str` The algorithm to use for reprojecting. This can be any of: 'interpolation', 'adaptive', and 'exact'. target_wcs : `astropy.wcs.wcsapi.BaseHighLevelWCS`, `astropy.wcs.wcsapi.BaseLowLevelWCS`, or `astropy.io.fits.Header` The WCS object to which the ``NDCube`` is to be reprojected. shape_out: `tuple`, optional The shape of the output data array. The ordering of the dimensions must follow NumPy ordering and not the WCS pixel shape. If not specified, `~astropy.wcs.wcsapi.BaseLowLevelWCS.array_shape` attribute (if available) from the low level API of the ``target_wcs`` is used. order: `int` or `str` The order of the interpolation (used only when the 'interpolation' or 'adaptive' algorithm is selected). For 'interpolation' algorithm, this can be any of: 'nearest-neighbor', 'bilinear', 'biquadratic', and 'bicubic'. For 'adaptive' algorithm, this can be either 'nearest-neighbor' or 'bilinear'. output_array: `numpy.ndarray`, optional An array in which to store the reprojected data. This can be any numpy array including a memory map, which may be helpful when dealing with extremely large files. parallel: `bool` or `int` Flag for parallel implementation (used only when the 'exact' algorithm is selected). If ``True``, a parallel implementation is chosen and the number of processes is selected automatically as the number of logical CPUs detected on the machine. If ``False``, a serial implementation is chosen. If the flag is a positive integer n greater than one, a parallel implementation using n processes is chosen. return_footprint: `bool` Whether to return the footprint in addition to the output NDCube. Returns ------- resampled_cube : `ndcube.NDCube` A new resultant NDCube object, the supplied ``target_wcs`` will be the ``.wcs`` attribute of the output ``NDCube``. footprint: `numpy.ndarray` Footprint of the input array in the output array. Values of 0 indicate no coverage or valid values in the input image, while values of 1 indicate valid values. Notes ----- This method doesn't support handling of the ``mask``, ``extra_coords``, and ``uncertainty`` attributes yet. However, ``meta`` and ``global_coords`` are copied to the output ``NDCube``. """ try: from reproject import reproject_adaptive, reproject_exact, reproject_interp from reproject.wcs_utils import has_celestial except ModuleNotFoundError: raise ImportError("The NDCube.reproject_to method requires the optional package `reproject`.") if isinstance(target_wcs, Mapping): target_wcs = WCS(header=target_wcs) low_level_target_wcs = utils.wcs.get_low_level_wcs(target_wcs, 'target_wcs') # 'adaptive' and 'exact' algorithms work only on 2D celestial WCS. if algorithm == 'adaptive' or algorithm == 'exact': if low_level_target_wcs.pixel_n_dim != 2 or low_level_target_wcs.world_n_dim != 2: raise ValueError('For adaptive and exact algorithms, target_wcs must be 2D.') if not has_celestial(target_wcs): raise ValueError('For adaptive and exact algorithms, ' 'target_wcs must contain celestial axes only.') if not utils.wcs.compare_wcs_physical_types(self.wcs, target_wcs): raise ValueError('Given target_wcs is not compatible with this NDCube, the physical types do not match.') # If shape_out is not specified explicity, try to extract it from the low level WCS if not shape_out: if hasattr(low_level_target_wcs, 'array_shape') and low_level_target_wcs.array_shape is not None: shape_out = low_level_target_wcs.array_shape else: raise ValueError("shape_out must be specified if target_wcs does not have the array_shape attribute.") self._validate_algorithm_and_order(algorithm, order) if algorithm == 'interpolation': data = reproject_interp(self, output_projection=target_wcs, shape_out=shape_out, order=order, output_array=output_array, return_footprint=return_footprint) elif algorithm == 'adaptive': data = reproject_adaptive(self, output_projection=target_wcs, shape_out=shape_out, order=order, return_footprint=return_footprint) elif algorithm == 'exact': data = reproject_exact(self, output_projection=target_wcs, shape_out=shape_out, parallel=parallel, return_footprint=return_footprint) if return_footprint: data, footprint = data resampled_cube = type(self)(data, wcs=target_wcs, meta=deepcopy(self.meta)) resampled_cube._global_coords = deepcopy(self.global_coords) if return_footprint: return resampled_cube, footprint return resampled_cube class NDCube(NDCubeBase): """ Class representing N-D data described by a single array and set of WCS transformations. Parameters ---------- data: `numpy.ndarray` The array holding the actual data in this object. wcs: `astropy.wcs.wcsapi.BaseLowLevelWCS`, `astropy.wcs.wcsapi.BaseHighLevelWCS`, optional The WCS object containing the axes' information, optional only if ``data`` is an `astropy.nddata.NDData` object. uncertainty : any type, optional Uncertainty in the dataset. Should have an attribute uncertainty_type that defines what kind of uncertainty is stored, for example "std" for standard deviation or "var" for variance. A metaclass defining such an interface is NDUncertainty - but isn’t mandatory. If the uncertainty has no such attribute the uncertainty is stored as UnknownUncertainty. Defaults to None. mask : any type, optional Mask for the dataset. Masks should follow the numpy convention that valid data points are marked by False and invalid ones with True. Defaults to None. meta : dict-like object, optional Additional meta information about the dataset. If no meta is provided an empty collections.OrderedDict is created. Default is None. unit : Unit-like or str, optional Unit for the dataset. Strings that can be converted to a Unit are allowed. Default is None. extra_coords : iterable of `tuple`, each with three entries (`str`, `int`, `astropy.units.quantity` or array-like) Gives the name, axis of data, and values of coordinates of a data axis not included in the WCS object. copy : bool, optional Indicates whether to save the arguments as copy. True copies every attribute before saving it while False tries to save every parameter as reference. Note however that it is not always possible to save the input as reference. Default is False. """ # We special case the default mpl plotter here so that we can only import # matplotlib when `.plotter` is accessed and raise an ImportError at the # last moment. plotter = PlotterDescriptor(default_type="mpl_plotter") """ A `~.MatplotlibPlotter` instance providing visualization methods. The type of this attribute can be changed to provide custom visualization functionality. """ def _as_mpl_axes(self): if hasattr(self.plotter, "_as_mpl_axes"): return self.plotter._as_mpl_axes() else: warnings.warn(f"The current plotter {self.plotter} does not have a '_as_mpl_axes' method. " "The default MatplotlibPlotter._as_mpl_axes method will be used instead.", UserWarning) plotter = MatplotlibPlotter(self) return plotter._as_mpl_axes() def plot(self, *args, **kwargs): """ A convenience function for the plotters default ``plot()`` method. Calling this method is the same as calling ``cube.plotter.plot``, the behaviour of this method can change if the `NDCube.plotter` class is set to a different ``Plotter`` class. """ if self.plotter is None: raise NotImplementedError( "This NDCube object does not have a .plotter defined so " "no default plotting functionality is available.") return self.plotter.plot(*args, **kwargs)
from datetime import date import shutil from pathlib import Path from typing import ( AbstractSet, Any, Callable, Iterable, List, Tuple, TypeVar, Union, Optional, Dict, Mapping, ) from adam.language.language_utils import ( phase2_language_generator, integrated_experiment_language_generator, ) from adam.learner.language_mode import LanguageMode from adam.language.language_generator import LanguageGenerator from adam.axis import GeonAxis from adam.curriculum.curriculum_utils import Phase1InstanceGroup from attr import attrib, attrs from attr.validators import instance_of from immutablecollections import ( ImmutableSet, ImmutableSetMultiDict, immutableset, immutablesetmultidict, ) from more_itertools import flatten from networkx import DiGraph from vistautils.parameters import Parameters from vistautils.parameters_only_entrypoint import parameters_only_entry_point from vistautils.preconditions import check_state from adam.curriculum.imprecise_descriptions_curriculum import ( make_imprecise_temporal_descriptions, make_imprecise_size_curriculum, make_subtle_verb_distinctions_curriculum, ) from adam.curriculum.attribute_constraining_action_curriculum import make_german_complete from adam.curriculum.m6_curriculum import make_m6_curriculum from adam.curriculum.phase2_curriculum import ( build_gaila_m8_curriculum, integrated_pursuit_learner_experiment_curriculum, build_gaila_phase_2_curriculum, integrated_pursuit_learner_experiment_test, ) from adam.curriculum.preposition_curriculum import make_prepositions_curriculum from adam.curriculum.pursuit_curriculum import make_pursuit_curriculum from adam.curriculum.phase1_curriculum import ( build_gaila_phase_1_curriculum, build_classifier_curriculum, ) from adam.curriculum import InstanceGroup from adam.curriculum.verbs_with_dynamic_prepositions_curriculum import ( make_verb_with_dynamic_prepositions_curriculum, ) from adam.geon import Geon from adam.axes import WORLD_AXES, AxesInfo, _GravitationalAxis from adam.language import TokenSequenceLinguisticDescription from adam.language.dependency import LinearizedDependencyTree from adam.ontology import IN_REGION, IS_SPEAKER, OntologyNode from adam.ontology.during import DuringAction from adam.ontology.phase1_ontology import ( PART_OF, SMALLER_THAN, BIGGER_THAN, MUCH_SMALLER_THAN, MUCH_BIGGER_THAN, ) from adam.ontology.phase1_spatial_relations import Region, SpatialPath, Direction from adam.perception import ObjectPerception, PerceptualRepresentation from adam.perception.developmental_primitive_perception import ( DevelopmentalPrimitivePerceptionFrame, HasBinaryProperty, HasColor, PropertyPerception, ) from adam.relation import Relation from adam.situation import SituationObject, SituationRegion from adam.situation.high_level_semantics_situation import HighLevelSemanticsSituation from adam.utilities import sign import random USAGE_MESSAGE = """ curriculum_to_html.py param_file \twhere param_file has the following parameter: \t\toutput_directory: where to write the HTML output """ EXPLANATION_HEADER = ( "\n<h2>How to Read Curriculum Dumps</h2>" "\n<p>Each training example for the learner is displayed below as a three-column table. " "In the first column is the 'Situation', which is a description of the salient aspects of the " "situation for use by curriculum designers in debugging. It is not shown to the learner. In " "the second column is the language the learner receives for the situation. In the last column" "is a representation of the learner's perception of the situation.</p>" "<h3>Reading the Language column</h3>" "<p>In most cases, the text in this column is exactly what is provided to the learner. " "However, if the text is of the form <i>X says 'Y'</i>, then Y is what is given to the " "learner, along with an annotation for which person in the perceptual representation said " "it.</p>" "<h3>Reading the Perception column</h3>" "<p>" "The perceptual features described in column 3 are presented to the learner in a machine " "interpretable fashion (e.g. as attributes, variables, spatial properties). Here, to " "assist interpretation of the curriculum we serialize them as debugging text strings.</p>" "<p><ul><li>Part of relations (e.g. a head being a part of a person) are represented with the " "parts" "\n\t appearing as sub-bullets of an object (e.g. head as a sub-bullet under person).</li>" "\n\t<li>Numeric indices (e.g. person_0 person_1 leg_0, leg_1) distinguish between objects of the" "\n\t same class. </li>" "\n\t<li>The IN_REGION relation describes the spatial relation between " "an object and region " "in the visual field.</li>" "\n\t<li>SpatialPath describes the path that a object takes during a situation. The path" "\n\t specifies if (and how) an object changes orientation. </li>" "\n\t<li>State changes or relations are described with arrows. Relations that are instantiated" "\n\t during the scene are preceded by 'Ø --->'. Those that cease to exist during a scene are" "\n\t followed by '---> Ø' </li>" "\n\t<li>Size relationships are indicated by the '>' symbol. We only display the bigger than size" "\n\t relation but the inverse is also a part of the relationship set. We only represent large differences in size." "\n\t '>>' denotes the 'much bigger than' relation.</li>" "<li>Many objects also have associated Geons, which describe their shape " "according to Biederman's visual perception theory (see deliverable docs for a citation).</li>" "<li>The colors provided in the background of a phrase reading 'color=#XXXXXX' is the color indicated by the hex code</li>" "<li>The Axis Facing section, if included, lists which axes of the objects in the scene face a given object. " "In most cases, this information is only provided for the addressee in a scene.</li>" "\n\t Generated On: {date}" "</ul>" ) LANGUAGE_GEN = LanguageGenerator[ # pylint: disable=invalid-name HighLevelSemanticsSituation, LinearizedDependencyTree ] CURRICULUM_BUILDER = Callable[ # pylint: disable=invalid-name [Optional[int], Optional[int], LANGUAGE_GEN], Iterable[Phase1InstanceGroup] ] STR_TO_CURRICULUM: Mapping[str, CURRICULUM_BUILDER] = { "isi-gaila-milestone-8": build_gaila_m8_curriculum, "phase1": build_gaila_phase_1_curriculum, "prepositions": make_prepositions_curriculum, "pursuit": make_pursuit_curriculum, "m6-curriculum": make_m6_curriculum, "verbs-with-dynamic-prepositions": make_verb_with_dynamic_prepositions_curriculum, "essen-fressen-distinction": make_german_complete, "imprecise-temporal": make_imprecise_temporal_descriptions, "imprecise-size": make_imprecise_size_curriculum, "subtle-verb-distinction": make_subtle_verb_distinctions_curriculum, "integrated-experiment": integrated_pursuit_learner_experiment_curriculum, "chinese-classifiers": build_classifier_curriculum, "phase2": build_gaila_phase_2_curriculum, "integrated-experiment-test": integrated_pursuit_learner_experiment_test, } def main(params: Parameters) -> None: root_output_directory = params.creatable_directory("output_directory") curriculum_string = params.string( "curriculum", valid_options=STR_TO_CURRICULUM.keys(), default="phase1" ) language_mode = params.enum( "language_mode", LanguageMode, default=LanguageMode.ENGLISH ) language_string = str(language_mode).rsplit(".", maxsplit=1)[-1].lower() num_samples = params.optional_positive_integer("num_samples") num_noise_objects = params.optional_positive_integer("num_noise_objects") phase1_curriculum_dir = root_output_directory / language_string / curriculum_string phase1_curriculum_dir.mkdir(parents=True, exist_ok=True) # We lazily instantiate the curriculum so we don't need to worry # about any of them we don't actually use. if ( curriculum_string == "integrated-experiment" or curriculum_string == "integrated-experiment-test" ): curriculum_to_render = STR_TO_CURRICULUM[curriculum_string]( num_samples, num_noise_objects, integrated_experiment_language_generator(language_mode), params=params.namespace_or_empty("curriculum_params"), ) elif curriculum_string == "phase2": curriculum_to_render = STR_TO_CURRICULUM[curriculum_string]( num_samples, num_noise_objects, integrated_experiment_language_generator(language_mode), ) else: curriculum_to_render = STR_TO_CURRICULUM[curriculum_string]( num_samples, num_noise_objects, phase2_language_generator(language_mode) ) sort_by_utterance_length_flag = params.boolean("sort_by_utterance", default=False) if sort_by_utterance_length_flag: random_seed = params.integer("random_seed", default=1) CurriculumToHtmlDumper().dump_to_html_as_sorted_by_utterance_length( curriculum_to_render, output_directory=phase1_curriculum_dir, title="GAILA Phase 1 Curriculum Sorted by Utterance Length", curriculum_string=curriculum_string, random_seed=random_seed, ) else: CurriculumToHtmlDumper().dump_to_html( curriculum_to_render, output_directory=phase1_curriculum_dir, title="GAILA Phase 1 Curriculum", ) @attrs(frozen=True, slots=True) class InstanceHolder: situation: str = attrib(validator=instance_of(str)) """ Holds a rendered situation string """ lingustics: str = attrib(validator=instance_of(str)) """ Holds a rendered linguistics string """ perception: str = attrib(validator=instance_of(str)) """ Holds a rendered perception string """ @attrs(frozen=True, slots=True) class CurriculumToHtmlDumper: """ Class to turn an `InstanceGroup` into an html document """ def dump_to_html_as_sorted_by_utterance_length( self, instance_groups: Iterable[ InstanceGroup[ HighLevelSemanticsSituation, LinearizedDependencyTree, DevelopmentalPrimitivePerceptionFrame, ] ], *, output_directory: Path, title: str, curriculum_string: str, random_seed: int, ): all_instances = [] for (_, instance_group) in enumerate(instance_groups): for instance in instance_group.instances(): (situation, dependency_tree, perception) = instance if not isinstance(situation, HighLevelSemanticsSituation): raise RuntimeError( f"Expected the Situation to be HighLevelSemanticsSituation got {type(situation)}" ) if not isinstance(dependency_tree, LinearizedDependencyTree): raise RuntimeError( f"Expected the Lingustics to be LinearizedDependencyTree got {type(dependency_tree)}" ) if not ( isinstance(perception, PerceptualRepresentation) and isinstance( perception.frames[0], DevelopmentalPrimitivePerceptionFrame ) ): raise RuntimeError( f"Expected the Perceptual Representation to contain DevelopmentalPrimitivePerceptionFrame got " f"{type(perception.frames)}" ) (_, speaker) = self.situation_text(situation) length = len(self._linguistic_text(dependency_tree, speaker).split()) all_instances.append((situation, dependency_tree, perception, length)) # shuffle random.seed(random_seed) random.shuffle(all_instances) # sort all_instances.sort(key=lambda instance: instance[3]) rendered_instances = [] for (situation, dependency_tree, perception, _) in all_instances: (situation_text, speaker) = self.situation_text(situation) rendered_instances.append( InstanceHolder( situation=situation_text, lingustics=self._linguistic_text(dependency_tree, speaker), perception=self.perception_text(perception), ) ) filename = f"{curriculum_string}-curriculum-sorted-by-utterance.html" chunk_size = 50 files_written: List[Tuple[str, str]] = [] for i in range(0, len(rendered_instances), chunk_size): chunk = rendered_instances[i : i + chunk_size] instance_group_header = f"{int(i / chunk_size):03} - {filename}" relative_filename = f"{instance_group_header}" print(relative_filename) files_written.append((instance_group_header, relative_filename)) with open( output_directory / relative_filename, "w", encoding="utf-8" ) as html_out: html_out.write(f"<head>\n\t<style>{CSS}\n\t</style>\n</head>") html_out.write(f"\n<body>\n\t<h1>{title} - {curriculum_string}</h1>") html_out.write("\t<a href='index.html'> Back to Index</a>") html_out.write(EXPLANATION_HEADER.format(date=date.today())) for (instance_number, instance_holder) in enumerate(immutableset(chunk)): # By using the immutable set we guaruntee iteration order and remove duplicates html_out.write( f"\n\t<table>\n" f"\t\t<thead>\n" f"\t\t\t<tr>\n" f'\t\t\t\t<th colspan="3">\n' f"\t\t\t\t\t<h2>Scene {instance_number}</h2>\n" f"\t\t\t\t</th>\n\t\t\t</tr>\n" f"\t\t</thead>\n" f"\t\t<tbody>\n" f"\t\t\t<tr>\n" f"\t\t\t\t<td>\n" f'\t\t\t\t\t<h3 id="situation-{instance_number}">Situation</h3>\n' f"\t\t\t\t</td>\n" f"\t\t\t\t<td>\n" f'\t\t\t\t\t<h3 id="linguistic-{instance_number}">Language</h3>\n' f"\t\t\t\t</td>\n" f"\t\t\t\t<td>\n" f'\t\t\t\t\t<h3 id="perception-{instance_number}">Learner Perception</h3>\n' f"\t\t\t\t</td>\n" f"\t\t\t</tr>\n" f"\t\t\t<tr>\n" f'\t\t\t\t<td valign="top">{instance_holder.situation}\n\t\t\t\t</td>\n' f'\t\t\t\t<td valign="top">{instance_holder.lingustics}</td>\n' f'\t\t\t\t<td valign="top">{instance_holder.perception}\n\t\t\t\t</td>\n' f"\t\t\t</tr>\n\t\t</tbody>\n\t</table>" ) html_out.write("\n</body>") html_out.write("\t<a href='index.html'> Back to Index</a>") with open( str(output_directory / "index.html"), "w", encoding="utf-8" ) as index_out: index_out.write(f"<head><title>{title}</title></head><body>") index_out.write("<ul>") for ( instance_group_title, instance_group_dump_file_relative_path, ) in files_written: index_out.write( f"\t<li><a href='{instance_group_dump_file_relative_path}'>" f"{instance_group_title}</a></li>" ) index_out.write("</ul>") index_out.write("</body>") def dump_to_html( self, instance_groups: Iterable[ InstanceGroup[ HighLevelSemanticsSituation, LinearizedDependencyTree, DevelopmentalPrimitivePerceptionFrame, ] ], *, output_directory: Path, title: str, ): r""" Method to take a list of `InstanceGroup`\ s and turns each one into an individual page Given a list of `InstanceGroup`\ s and an output directory of *outputdestination* along with a *title* for the pages the generator loops through each group and calls the internal method to create HTML pages. """ # first nuke the output directory # we check it only contains HTML files for safety if output_directory.exists(): for f in output_directory.iterdir(): if f.suffix != ".html": raise RuntimeError( f"Output directory does not appear to be a curriculum " f"dump. It contains the non-html file {f}" ) shutil.rmtree(str(output_directory)) output_directory.mkdir(parents=True, exist_ok=True) files_written: List[Tuple[str, str]] = [] # write each instance group to its own file for (idx, instance_group) in enumerate(instance_groups): instance_group_header = f"{idx:03} - {instance_group.name()}" # not absolute because when we use this to make links in index.html, # we don't want them to break if the user moves the directory. relative_filename = f"{instance_group_header}.html" files_written.append((instance_group_header, relative_filename)) self._dump_instance_group( instance_group=instance_group, output_destination=output_directory / relative_filename, title=f"{instance_group_header} - {title}", ) # write an table of contents to index.html with open(output_directory / "index.html", "w", encoding="utf-8") as index_out: index_out.write(f"<head><title>{title}</title></head><body>") index_out.write("<ul>") for ( instance_group_title, instance_group_dump_file_relative_path, ) in files_written: index_out.write( f"\t<li><a href='{instance_group_dump_file_relative_path}'>" f"{instance_group_title}</a></li>" ) index_out.write("</ul>") index_out.write("</body>") def _dump_instance_group( self, instance_group: InstanceGroup[ HighLevelSemanticsSituation, LinearizedDependencyTree, DevelopmentalPrimitivePerceptionFrame, ], title: str, output_destination: Path, ): """ Internal generation method for individual instance groups into HTML pages Given an `InstanceGroup` with a `HighLevelSemanticsSituation`, `LinearizedDependencyTree`, and `DevelopmentalPrimitivePerceptionFrame` this function creates an html page at the given *outputdestination* and *title*. If the file already exists and *overwrite* is set to False an error is raised in execution. Each page turns an instance group with each "instance" as an indiviudal section on the page. """ # PreRender Instances so we can remove duplicates by converting to an immutable set rendered_instances = [] for (situation, dependency_tree, perception) in instance_group.instances(): if not isinstance(situation, HighLevelSemanticsSituation): raise RuntimeError( f"Expected the Situation to be HighLevelSemanticsSituation got {type(situation)}" ) if not ( isinstance(dependency_tree, LinearizedDependencyTree) or isinstance(dependency_tree, TokenSequenceLinguisticDescription) ): raise RuntimeError( f"Expected the Lingustics to be LinearizedDependencyTree or TokenSequenceLinguisticDescription got {type(dependency_tree)}" ) if not ( isinstance(perception, PerceptualRepresentation) and isinstance( perception.frames[0], DevelopmentalPrimitivePerceptionFrame ) ): raise RuntimeError( f"Expected the Perceptual Representation to contain DevelopmentalPrimitivePerceptionFrame got " f"{type(perception.frames)}" ) (situation_text, speaker) = self.situation_text(situation) rendered_instances.append( InstanceHolder( situation=situation_text, lingustics=self._linguistic_text(dependency_tree, speaker), perception=self.perception_text(perception), ) ) with open(output_destination, "w", encoding="utf-8") as html_out: html_out.write(f"<head>\n\t<style>{CSS}\n\t</style>\n</head>") html_out.write(f"\n<body>\n\t<h1>{title}</h1>") html_out.write("\t<a href='index.html'> Back to Index</a>") html_out.write(EXPLANATION_HEADER.format(date=date.today())) # By using the immutable set we guarantee iteration order and remove duplicates for (instance_number, instance_holder) in enumerate( immutableset(rendered_instances) ): html_out.write( f"\n\t<table>\n" f"\t\t<thead>\n" f"\t\t\t<tr>\n" f'\t\t\t\t<th colspan="3">\n' f"\t\t\t\t\t<h2>Scene {instance_number}</h2>\n" f"\t\t\t\t</th>\n\t\t\t</tr>\n" f"\t\t</thead>\n" f"\t\t<tbody>\n" f"\t\t\t<tr>\n" f"\t\t\t\t<td>\n" f'\t\t\t\t\t<h3 id="situation-{instance_number}">Situation</h3>\n' f"\t\t\t\t</td>\n" f"\t\t\t\t<td>\n" f'\t\t\t\t\t<h3 id="linguistic-{instance_number}">Language</h3>\n' f"\t\t\t\t</td>\n" f"\t\t\t\t<td>\n" f'\t\t\t\t\t<h3 id="perception-{instance_number}">Learner Perception</h3>\n' f"\t\t\t\t</td>\n" f"\t\t\t</tr>\n" f"\t\t\t<tr>\n" f'\t\t\t\t<td valign="top">{instance_holder.situation}\n\t\t\t\t</td>\n' f'\t\t\t\t<td valign="top">{instance_holder.lingustics}</td>\n' f'\t\t\t\t<td valign="top">{instance_holder.perception}\n\t\t\t\t</td>\n' f"\t\t\t</tr>\n\t\t</tbody>\n\t</table>" ) html_out.write("\t<a href='index.html'> Back to Index</a>") html_out.write("\n</body>") def situation_text( self, situation: HighLevelSemanticsSituation ) -> Tuple[str, Optional[SituationObject]]: """ Converts a situation description into its sub-parts as a table entry """ speaker = None output_text = ["\n\t\t\t\t\t<h4>Objects</h4>\n\t\t\t\t\t<ul>"] seen_handles_to_next_index: Dict[str, int] = {} situation_obj_to_handle: Dict[SituationObject, str] = {} for obj in situation.all_objects: handle: str if obj.ontology_node.handle in seen_handles_to_next_index: handle = ( obj.ontology_node.handle + "_" + str(seen_handles_to_next_index[obj.ontology_node.handle]) ) seen_handles_to_next_index[obj.ontology_node.handle] += 1 else: handle = obj.ontology_node.handle + "_0" seen_handles_to_next_index[obj.ontology_node.handle] = 1 property_string: str prop_strings = [] if obj.properties: for prop in obj.properties: prop_strings.append(prop.handle) if prop == IS_SPEAKER: speaker = obj property_string = "[" + ",".join(prop_strings) + "]" else: property_string = "" output_text.append(f"\t\t\t\t\t\t<li>{handle}{property_string}</li>") situation_obj_to_handle[obj] = handle output_text.append("\t\t\t\t\t</ul>") if situation.actions: output_text.append("\t\t\t\t\t<h4>Actions</h4>\n\t\t\t\t\t<ul>") for acts in situation.actions: output_text.append( f"\t\t\t\t\t\t<li>{acts.action_type.handle}</li>\n\t\t\t\t\t<ul>" ) for mapping in acts.argument_roles_to_fillers.keys(): for object_ in acts.argument_roles_to_fillers[mapping]: output_text.append( f"\t\t\t\t\t\t<li>{mapping.handle} is {self._situation_object_or_region_text(object_, situation_obj_to_handle)}</li>" ) for mapping in acts.auxiliary_variable_bindings.keys(): output_text.append( f"\t\t\t\t\t\t<li>{mapping.debug_handle} is {self._situation_object_or_region_text(acts.auxiliary_variable_bindings[mapping], situation_obj_to_handle)}" ) output_text.append("\t\t\t\t\t</ul>") if situation.always_relations: output_text.append("\t\t\t\t\t<h4>Relations</h4>\n\t\t\t\t\t<ul>") for rel in situation.always_relations: output_text.append( f"\t\t\t\t\t\t<li>{rel.relation_type.handle}({situation_obj_to_handle[rel.first_slot]}," f"{self._situation_object_or_region_text(rel.second_slot, situation_obj_to_handle)})</li>" ) output_text.append("\t\t\t\t\t</ul>") if situation.syntax_hints: output_text.append("\t\t\t\t\t<h4>Syntax Hints</h4>\n\t\t\t\t\t<ul>") for hint in situation.syntax_hints: output_text.append(f"\t\t\t\t\t\t<li>{hint}</li>") output_text.append("\t\t\t\t\t</ul>") return ("\n".join(output_text), speaker) def _situation_object_or_region_text( self, obj_or_region: Union[SituationObject, SituationRegion], obj_to_handle: Dict[SituationObject, str], ) -> str: def _direction(direction: Direction[SituationObject]) -> str: polarity = "+" if direction.positive else "-" axes_function = ( direction.relative_to_axis if isinstance(direction.relative_to_axis, _GravitationalAxis) else direction.relative_to_axis.__repr__( # type: ignore object_map=obj_to_handle ) ) return f"{polarity}{axes_function}" if isinstance(obj_or_region, SituationObject): return obj_to_handle[obj_or_region] else: parts = [] parts.append( f"reference_object={obj_to_handle[obj_or_region.reference_object]}" ) if obj_or_region.distance: parts.append(f"distance={obj_or_region.distance.name}") if obj_or_region.direction: parts.append(f"direction={_direction(obj_or_region.direction)}") return "Region(" + ", ".join(parts) + ")" _opposite_size_relations: Dict[OntologyNode, OntologyNode] = { SMALLER_THAN: BIGGER_THAN, BIGGER_THAN: SMALLER_THAN, MUCH_SMALLER_THAN: MUCH_BIGGER_THAN, MUCH_BIGGER_THAN: MUCH_SMALLER_THAN, } # Collapse pairs of size relations (biggerThan/smallerThan) into # a single relation def _get_single_size_relation( self, relation: Relation[Any], relation_set: ImmutableSet[Relation[Any]] ): single_size_relation: Optional[Tuple[Any, str, Any]] = None if relation.relation_type in self._opposite_size_relations: if ( Relation( self._opposite_size_relations[relation.relation_type], relation.second_slot, relation.first_slot, ) in relation_set ): if relation.relation_type == SMALLER_THAN: single_size_relation = ( relation.second_slot, ">", relation.first_slot, ) elif relation.relation_type == BIGGER_THAN: single_size_relation = ( relation.first_slot, ">", relation.second_slot, ) elif relation.relation_type == MUCH_SMALLER_THAN: single_size_relation = ( relation.second_slot, ">>", relation.first_slot, ) else: single_size_relation = ( relation.first_slot, ">>", relation.second_slot, ) return single_size_relation def perception_text( self, perception: PerceptualRepresentation[DevelopmentalPrimitivePerceptionFrame] ) -> str: """ Turns a perception into a list of items in the perceptions frames. """ output_text: List[str] = [] check_state( len(perception.frames) in (1, 2), "Only know how to handle 1 or 2 frame " "perceptions for now", ) perception_is_dynamic = len(perception.frames) > 1 # first, we build an index of objects to their properties. # This will be used so that when we list the objects, # we can easily list their properties in brackets right after them. def extract_subject(prop: PropertyPerception) -> ObjectPerception: return prop.perceived_object first_frame_properties = _index_to_setmultidict( perception.frames[0].property_assertions, extract_subject ) second_frame_properties = ( _index_to_setmultidict( perception.frames[1].property_assertions, extract_subject ) if perception_is_dynamic else immutablesetmultidict() ) # Next, we determine what objects persist between both frames # and which do not. first_frame_objects = perception.frames[0].perceived_objects second_frame_objects = ( perception.frames[1].perceived_objects if perception_is_dynamic else immutableset() ) static_objects = ( first_frame_objects.intersection(second_frame_objects) if perception_is_dynamic else first_frame_objects ) all_objects = first_frame_objects.union(second_frame_objects) # For objects, properties, and relations we will use arrows to indicate # when something beings or ceased to exist between frames. # Since the logic will be the same for all three types, # we pull it out into a function. def compute_arrow( item: Any, static_items: AbstractSet[Any], first_frame_items: AbstractSet[Any] ) -> Tuple[str, str]: if item in static_items: # item doesn't change - no arrow return ("", "") elif item in first_frame_items: # item ceases to exist return ("", " ---> Ø") else: # item beings to exist in the second frame return ("Ø ---> ", "") # the logic for rendering objects, which will be used in the loop below. # This needs to be an inner function so it can access the frame property maps, etc. def render_object(obj: ObjectPerception) -> str: obj_text = f"<i>{obj.debug_handle}</i>" first_frame_obj_properties = first_frame_properties[obj] second_frame_obj_properties = second_frame_properties[obj] static_properties = ( second_frame_obj_properties.intersection(first_frame_obj_properties) if second_frame_obj_properties else first_frame_obj_properties ) # logic for rendering properties, for use in the loop below. def render_property(prop: PropertyPerception) -> str: (prop_prefix, prop_suffix) = compute_arrow( prop, static_properties, first_frame_obj_properties ) prop_string: str if isinstance(prop, HasColor): prop_string = ( f'<span style="background-color: {prop.color}; ' f'color: {prop.color.inverse()}; border: 1px solid black;">' f"color={prop.color.hex}</span>" ) elif isinstance(prop, HasBinaryProperty): prop_string = prop.binary_property.handle else: raise RuntimeError(f"Cannot render property: {prop}") return f"{prop_prefix}{prop_string}{prop_suffix}" all_properties: ImmutableSet[PropertyPerception] = immutableset( flatten([first_frame_obj_properties, second_frame_obj_properties]) ) prop_strings = [render_property(prop) for prop in all_properties] if prop_strings: return f"{obj_text}[{"; ".join(prop_strings)}]" else: return obj_text # Here we process the relations between the two scenes to determine all relations. # This has to be done before rending objects so we can use the PART_OF relation to order # the objects. first_frame_relations = perception.frames[0].relations second_frame_relations = ( perception.frames[1].relations if perception_is_dynamic else immutableset() ) static_relations = ( second_frame_relations.intersection(first_frame_relations) if perception_is_dynamic else first_frame_relations ) all_relations = first_frame_relations.union(second_frame_relations) # Here we add the perceived objects to a NetworkX DiGraph with PART_OF relations being the # edges between objects. This allows us to do pre-order traversal of the Graph to make a # nested <ul></ul> for the objects rather than a flat list. graph = DiGraph() root = ObjectPerception("root", axes=WORLD_AXES) graph.add_node(root) expressed_relations = set() axis_to_object: Dict[GeonAxis, ObjectPerception] = {} for object_ in all_objects: graph.add_node(object_) graph.add_edge(root, object_) for axis in object_.axes.all_axes: axis_to_object[axis] = object_ for relation_ in all_relations: if relation_.relation_type == PART_OF: graph.add_edge(relation_.second_slot, relation_.first_slot) if graph.has_edge(root, relation_.first_slot): graph.remove_edge(root, relation_.first_slot) expressed_relations.add(relation_) # Next, we render objects, together with their properties, using preorder DFS Traversal # We also add in `In Region` relationships at this step for objects which have them. output_text.append("\n\t\t\t\t\t<h5>Perceived Objects</h5>\n\t\t\t\t\t<ul>") visited = set() region_relations = immutableset( region for region in all_relations if region.relation_type == IN_REGION ) # This loop doesn't quite get the tab spacing right. It could at the cost of increased # complexity. Would need to track the "depth" we are currently at. axis_info = perception.frames[0].axis_info def dfs_walk(node, depth=0): visited.add(node) if not node == root: (obj_prefix, obj_suffix) = compute_arrow( node, static_objects, first_frame_objects ) output_text.append( "\t" * (6 + depth) + f"<li>{obj_prefix}{render_object(node)}{obj_suffix}<ul>" ) if node.geon: output_text.append( f"\t\t\t\t\t\t<li>Geon: {self._render_geon(node.geon, indent_dept=7)}</li>" ) # Handle Region Relations for region_relation in region_relations: if region_relation.first_slot == node: (relation_prefix, relation_suffix) = compute_arrow( region_relation, static_relations, first_frame_relations ) relation_str = self._render_relation(axis_info, region_relation) output_text.append( f"\t\t\t\t\t\t<li>{relation_prefix}" f"{relation_str}{relation_suffix}</li>" ) expressed_relations.add(region_relation) for succ in graph.successors(node): if succ not in visited: depth = depth + 6 dfs_walk(succ, depth) depth = depth - 6 output_text.append("\t" * (6 + depth) + "</ul></li>") dfs_walk(root) output_text.append("\t\t\t\t\t</ul>") # Finally we render remaining relations between objects remaining_relations = immutableset( relation for relation in all_relations if relation not in expressed_relations ) if remaining_relations: output_text.append("\t\t\t\t\t<h5>Other Relations</h5>\n\t\t\t\t\t<ul>") for relation in remaining_relations: (relation_prefix, relation_suffix) = compute_arrow( relation, static_relations, first_frame_relations ) single_size_relation: Optional[ Tuple[Any, str, Any] ] = self._get_single_size_relation(relation, all_relations) if single_size_relation: relation_text = f"{single_size_relation[0]} {single_size_relation[1]} {single_size_relation[2]}" size_output = f"\t\t\t\t\t\t<li>{relation_prefix}{relation_text}{relation_suffix}</li>" if size_output not in output_text: output_text.append(size_output) else: output_text.append( f"\t\t\t\t\t\t<li>{relation_prefix}{relation}{relation_suffix}</li>" ) output_text.append("\t\t\t\t\t</ul>") if perception.during: output_text.append("\t\t\t\t\t<h5>During the action</h5>") output_text.append(self._render_during(perception.during, indent_depth=5)) if axis_info and axis_info.axes_facing: output_text.append(("\t\t\t\t\t<h5>Axis Facings</h5>")) output_text.append(("\t\t\t\t\t<ul>")) for object_ in axis_info.axes_facing: output_text.append( f"\t\t\t\t\t\t<li>{object_.debug_handle} faced by:\n\t\t\t\t\t\t<ul>" ) for axis in axis_info.axes_facing[object_]: output_text.append( f"\t\t\t\t\t\t\t<li>{axis} possessed by {axis_to_object[axis]}</li>" ) output_text.append("\t\t\t\t\t\t</ul>") output_text.append("\t\t\t\t\t</ul>") return "\n".join(output_text) def _render_relation( self, axis_info: AxesInfo[ObjectPerception], relation: Relation[ObjectPerception] ) -> str: second_slot_str: str filler2 = relation.second_slot if isinstance(filler2, Region): parts = [str(filler2.reference_object)] if filler2.distance: parts.append(f"distance={filler2.distance}") if filler2.direction: parts.append( f"direction={sign(filler2.direction.positive)}" f"{filler2.direction.relative_to_concrete_axis(axis_info)}" ) second_slot_str = f"Region({",".join(parts)})" else: second_slot_str = str(filler2) return f"{relation.relation_type}({relation.first_slot}, {second_slot_str})" def _render_during( self, during: DuringAction[ObjectPerception], *, indent_depth: int = 0 ) -> str: indent = "\t" * indent_depth lines = [f"{indent}<ul>"] if during.objects_to_paths: lines.append(f"{indent}\t<li><b>Paths:</b>") lines.append(f"{indent}\t<ul>") for (object_, path) in during.objects_to_paths.items(): path_rendering = self._render_path(path, indent_depth=indent_depth + 2) lines.append(f"{indent}\t\t<li>{object_}: {path_rendering}</li></ul>") lines.append(f"{indent}</ul></li>") if during.continuously: lines.append(f"{indent}\t<li><b>Relations which hold continuously:</b>") lines.append(f"{indent}\t<ul>") for relation in during.continuously: lines.append(f"{indent}\t\t<li>{relation}</li>") lines.append(f"{indent}</ul></li>") if during.at_some_point: lines.append(f"{indent}\t<li><b>Relations which hold at some point:</b>") lines.append(f"{indent}\t<ul>") for relation in during.at_some_point: lines.append(f"{indent}\t\t<li>{relation}</li>") lines.append(f"{indent}</ul></li>") lines.append(f"{indent}</ul>") return "\n".join(lines) def _render_path( self, path: SpatialPath[ObjectPerception], *, indent_depth: int = 0 ) -> str: indent = "\t" * indent_depth lines = [f"{indent}<ul>"] lines.append(f"{indent}\t<li>") lines.append(str(path)) lines.append(f"{indent}\t</li>") return "\n".join(lines) def _linguistic_text( self, linguistic: LinearizedDependencyTree, speaker: Optional[SituationObject] ) -> str: """ Parses the Linguistic Description of a Linearized Dependency Tree into a table entry Takes a `LinearizedDependencyTree` which is turned into a token sequence and phrased as a sentence for display. Returns a List[str] """ if speaker: return ( f'{speaker.ontology_node.handle} says: "' + " ".join(linguistic.as_token_sequence()) + '"' ) else: return " ".join(linguistic.as_token_sequence()) def _render_geon(self, geon: Geon, *, indent_dept: int = 0) -> str: indent = "\t" * indent_dept lines = [f"{indent}<ul>"] lines.append( f"{indent}\t<li>Cross Section: {geon.cross_section} | Cross Section Size: {geon.cross_section_size}</li>" ) if geon.generating_axis == geon.axes.primary_axis: lines.append( f"{indent}\t<li><b>Generating Axis: {geon.generating_axis}</b></li>" ) else: lines.append(f"{indent}\t<li>Generating Axis: {geon.generating_axis}</li>") if geon.axes.orienting_axes: lines.append(f"{indent}\t<li>Orienting Axes:") lines.append(f"{indent}\t<ul>") for axis in geon.axes.orienting_axes: if axis == geon.axes.primary_axis: lines.append(f"{indent}\t\t<li><b>{axis}</b></li>") else: lines.append(f"{indent}\t\t<li>{axis}</li>") lines.append(f"{indent}\t</ul>") lines.append(f"{indent}\t</li>") if geon.axes.axis_relations: lines.append(f"{indent}\t<li>Axes Relations:") lines.append(f"{indent}\t<ul>") for axis_relation in geon.axes.axis_relations: single_size_relation: Optional[ Tuple[Any, str, Any] ] = self._get_single_size_relation( axis_relation, geon.axes.axis_relations ) if single_size_relation: size_relation_text = f"{indent}\t\t<li>{single_size_relation[0].debug_name} {single_size_relation[1]} {single_size_relation[2].debug_name}</li>" if size_relation_text not in lines: lines.append(size_relation_text) elif isinstance(axis_relation.second_slot, Region): lines.append( f"{indent}\t\t<li>{axis_relation.relation_type}({axis_relation.first_slot.debug_name},{axis_relation.second_slot})</li>" ) else: lines.append( f"{indent}\t\t<li>{axis_relation.relation_type}({axis_relation.first_slot.debug_name},{axis_relation.second_slot.debug_name})</li>" ) lines.append(f"{indent}\t</ul>") lines.append(f"{indent}\t</li>") lines.append(f"{indent}</ul>") return "\n".join(lines) CSS = """ body { font-size: 1em; font-family: sans-serif; } table td { p padding: 1em; background-color: #FAE5D3 ; } """ _KT = TypeVar("_KT") _VT = TypeVar("_VT") def _index_to_setmultidict( items: Iterable[_VT], index_func: Callable[[_VT], _KT] ) -> ImmutableSetMultiDict[_KT, _VT]: return immutablesetmultidict((index_func(x), x) for x in items) if __name__ == "__main__": parameters_only_entry_point(main, usage_message=USAGE_MESSAGE)
from datetime import date import shutil from pathlib import Path from typing import ( AbstractSet, Any, Callable, Iterable, List, Tuple, TypeVar, Union, Optional, Dict, Mapping, ) from adam.language.language_utils import ( phase2_language_generator, integrated_experiment_language_generator, ) from adam.learner.language_mode import LanguageMode from adam.language.language_generator import LanguageGenerator from adam.axis import GeonAxis from adam.curriculum.curriculum_utils import Phase1InstanceGroup from attr import attrib, attrs from attr.validators import instance_of from immutablecollections import ( ImmutableSet, ImmutableSetMultiDict, immutableset, immutablesetmultidict, ) from more_itertools import flatten from networkx import DiGraph from vistautils.parameters import Parameters from vistautils.parameters_only_entrypoint import parameters_only_entry_point from vistautils.preconditions import check_state from adam.curriculum.imprecise_descriptions_curriculum import ( make_imprecise_temporal_descriptions, make_imprecise_size_curriculum, make_subtle_verb_distinctions_curriculum, ) from adam.curriculum.attribute_constraining_action_curriculum import make_german_complete from adam.curriculum.m6_curriculum import make_m6_curriculum from adam.curriculum.phase2_curriculum import ( build_gaila_m8_curriculum, integrated_pursuit_learner_experiment_curriculum, build_gaila_phase_2_curriculum, integrated_pursuit_learner_experiment_test, ) from adam.curriculum.preposition_curriculum import make_prepositions_curriculum from adam.curriculum.pursuit_curriculum import make_pursuit_curriculum from adam.curriculum.phase1_curriculum import ( build_gaila_phase_1_curriculum, build_classifier_curriculum, ) from adam.curriculum import InstanceGroup from adam.curriculum.verbs_with_dynamic_prepositions_curriculum import ( make_verb_with_dynamic_prepositions_curriculum, ) from adam.geon import Geon from adam.axes import WORLD_AXES, AxesInfo, _GravitationalAxis from adam.language import TokenSequenceLinguisticDescription from adam.language.dependency import LinearizedDependencyTree from adam.ontology import IN_REGION, IS_SPEAKER, OntologyNode from adam.ontology.during import DuringAction from adam.ontology.phase1_ontology import ( PART_OF, SMALLER_THAN, BIGGER_THAN, MUCH_SMALLER_THAN, MUCH_BIGGER_THAN, ) from adam.ontology.phase1_spatial_relations import Region, SpatialPath, Direction from adam.perception import ObjectPerception, PerceptualRepresentation from adam.perception.developmental_primitive_perception import ( DevelopmentalPrimitivePerceptionFrame, HasBinaryProperty, HasColor, PropertyPerception, ) from adam.relation import Relation from adam.situation import SituationObject, SituationRegion from adam.situation.high_level_semantics_situation import HighLevelSemanticsSituation from adam.utilities import sign import random USAGE_MESSAGE = """ curriculum_to_html.py param_file \twhere param_file has the following parameter: \t\toutput_directory: where to write the HTML output """ EXPLANATION_HEADER = ( "\n<h2>How to Read Curriculum Dumps</h2>" "\n<p>Each training example for the learner is displayed below as a three-column table. " "In the first column is the 'Situation', which is a description of the salient aspects of the " "situation for use by curriculum designers in debugging. It is not shown to the learner. In " "the second column is the language the learner receives for the situation. In the last column" "is a representation of the learner's perception of the situation.</p>" "<h3>Reading the Language column</h3>" "<p>In most cases, the text in this column is exactly what is provided to the learner. " "However, if the text is of the form <i>X says 'Y'</i>, then Y is what is given to the " "learner, along with an annotation for which person in the perceptual representation said " "it.</p>" "<h3>Reading the Perception column</h3>" "<p>" "The perceptual features described in column 3 are presented to the learner in a machine " "interpretable fashion (e.g. as attributes, variables, spatial properties). Here, to " "assist interpretation of the curriculum we serialize them as debugging text strings.</p>" "<p><ul><li>Part of relations (e.g. a head being a part of a person) are represented with the " "parts" "\n\t appearing as sub-bullets of an object (e.g. head as a sub-bullet under person).</li>" "\n\t<li>Numeric indices (e.g. person_0 person_1 leg_0, leg_1) distinguish between objects of the" "\n\t same class. </li>" "\n\t<li>The IN_REGION relation describes the spatial relation between " "an object and region " "in the visual field.</li>" "\n\t<li>SpatialPath describes the path that a object takes during a situation. The path" "\n\t specifies if (and how) an object changes orientation. </li>" "\n\t<li>State changes or relations are described with arrows. Relations that are instantiated" "\n\t during the scene are preceded by 'Ø --->'. Those that cease to exist during a scene are" "\n\t followed by '---> Ø' </li>" "\n\t<li>Size relationships are indicated by the '>' symbol. We only display the bigger than size" "\n\t relation but the inverse is also a part of the relationship set. We only represent large differences in size." "\n\t '>>' denotes the 'much bigger than' relation.</li>" "<li>Many objects also have associated Geons, which describe their shape " "according to Biederman's visual perception theory (see deliverable docs for a citation).</li>" "<li>The colors provided in the background of a phrase reading 'color=#XXXXXX' is the color indicated by the hex code</li>" "<li>The Axis Facing section, if included, lists which axes of the objects in the scene face a given object. " "In most cases, this information is only provided for the addressee in a scene.</li>" "\n\t Generated On: {date}" "</ul>" ) LANGUAGE_GEN = LanguageGenerator[ # pylint: disable=invalid-name HighLevelSemanticsSituation, LinearizedDependencyTree ] CURRICULUM_BUILDER = Callable[ # pylint: disable=invalid-name [Optional[int], Optional[int], LANGUAGE_GEN], Iterable[Phase1InstanceGroup] ] STR_TO_CURRICULUM: Mapping[str, CURRICULUM_BUILDER] = { "isi-gaila-milestone-8": build_gaila_m8_curriculum, "phase1": build_gaila_phase_1_curriculum, "prepositions": make_prepositions_curriculum, "pursuit": make_pursuit_curriculum, "m6-curriculum": make_m6_curriculum, "verbs-with-dynamic-prepositions": make_verb_with_dynamic_prepositions_curriculum, "essen-fressen-distinction": make_german_complete, "imprecise-temporal": make_imprecise_temporal_descriptions, "imprecise-size": make_imprecise_size_curriculum, "subtle-verb-distinction": make_subtle_verb_distinctions_curriculum, "integrated-experiment": integrated_pursuit_learner_experiment_curriculum, "chinese-classifiers": build_classifier_curriculum, "phase2": build_gaila_phase_2_curriculum, "integrated-experiment-test": integrated_pursuit_learner_experiment_test, } def main(params: Parameters) -> None: root_output_directory = params.creatable_directory("output_directory") curriculum_string = params.string( "curriculum", valid_options=STR_TO_CURRICULUM.keys(), default="phase1" ) language_mode = params.enum( "language_mode", LanguageMode, default=LanguageMode.ENGLISH ) language_string = str(language_mode).rsplit(".", maxsplit=1)[-1].lower() num_samples = params.optional_positive_integer("num_samples") num_noise_objects = params.optional_positive_integer("num_noise_objects") phase1_curriculum_dir = root_output_directory / language_string / curriculum_string phase1_curriculum_dir.mkdir(parents=True, exist_ok=True) # We lazily instantiate the curriculum so we don't need to worry # about any of them we don't actually use. if ( curriculum_string == "integrated-experiment" or curriculum_string == "integrated-experiment-test" ): curriculum_to_render = STR_TO_CURRICULUM[curriculum_string]( num_samples, num_noise_objects, integrated_experiment_language_generator(language_mode), params=params.namespace_or_empty("curriculum_params"), ) elif curriculum_string == "phase2": curriculum_to_render = STR_TO_CURRICULUM[curriculum_string]( num_samples, num_noise_objects, integrated_experiment_language_generator(language_mode), ) else: curriculum_to_render = STR_TO_CURRICULUM[curriculum_string]( num_samples, num_noise_objects, phase2_language_generator(language_mode) ) sort_by_utterance_length_flag = params.boolean("sort_by_utterance", default=False) if sort_by_utterance_length_flag: random_seed = params.integer("random_seed", default=1) CurriculumToHtmlDumper().dump_to_html_as_sorted_by_utterance_length( curriculum_to_render, output_directory=phase1_curriculum_dir, title="GAILA Phase 1 Curriculum Sorted by Utterance Length", curriculum_string=curriculum_string, random_seed=random_seed, ) else: CurriculumToHtmlDumper().dump_to_html( curriculum_to_render, output_directory=phase1_curriculum_dir, title="GAILA Phase 1 Curriculum", ) @attrs(frozen=True, slots=True) class InstanceHolder: situation: str = attrib(validator=instance_of(str)) """ Holds a rendered situation string """ lingustics: str = attrib(validator=instance_of(str)) """ Holds a rendered linguistics string """ perception: str = attrib(validator=instance_of(str)) """ Holds a rendered perception string """ @attrs(frozen=True, slots=True) class CurriculumToHtmlDumper: """ Class to turn an `InstanceGroup` into an html document """ def dump_to_html_as_sorted_by_utterance_length( self, instance_groups: Iterable[ InstanceGroup[ HighLevelSemanticsSituation, LinearizedDependencyTree, DevelopmentalPrimitivePerceptionFrame, ] ], *, output_directory: Path, title: str, curriculum_string: str, random_seed: int, ): all_instances = [] for (_, instance_group) in enumerate(instance_groups): for instance in instance_group.instances(): (situation, dependency_tree, perception) = instance if not isinstance(situation, HighLevelSemanticsSituation): raise RuntimeError( f"Expected the Situation to be HighLevelSemanticsSituation got {type(situation)}" ) if not isinstance(dependency_tree, LinearizedDependencyTree): raise RuntimeError( f"Expected the Lingustics to be LinearizedDependencyTree got {type(dependency_tree)}" ) if not ( isinstance(perception, PerceptualRepresentation) and isinstance( perception.frames[0], DevelopmentalPrimitivePerceptionFrame ) ): raise RuntimeError( f"Expected the Perceptual Representation to contain DevelopmentalPrimitivePerceptionFrame got " f"{type(perception.frames)}" ) (_, speaker) = self.situation_text(situation) length = len(self._linguistic_text(dependency_tree, speaker).split()) all_instances.append((situation, dependency_tree, perception, length)) # shuffle random.seed(random_seed) random.shuffle(all_instances) # sort all_instances.sort(key=lambda instance: instance[3]) rendered_instances = [] for (situation, dependency_tree, perception, _) in all_instances: (situation_text, speaker) = self.situation_text(situation) rendered_instances.append( InstanceHolder( situation=situation_text, lingustics=self._linguistic_text(dependency_tree, speaker), perception=self.perception_text(perception), ) ) filename = f"{curriculum_string}-curriculum-sorted-by-utterance.html" chunk_size = 50 files_written: List[Tuple[str, str]] = [] for i in range(0, len(rendered_instances), chunk_size): chunk = rendered_instances[i : i + chunk_size] instance_group_header = f"{int(i / chunk_size):03} - {filename}" relative_filename = f"{instance_group_header}" print(relative_filename) files_written.append((instance_group_header, relative_filename)) with open( output_directory / relative_filename, "w", encoding="utf-8" ) as html_out: html_out.write(f"<head>\n\t<style>{CSS}\n\t</style>\n</head>") html_out.write(f"\n<body>\n\t<h1>{title} - {curriculum_string}</h1>") html_out.write("\t<a href='index.html'> Back to Index</a>") html_out.write(EXPLANATION_HEADER.format(date=date.today())) for (instance_number, instance_holder) in enumerate(immutableset(chunk)): # By using the immutable set we guaruntee iteration order and remove duplicates html_out.write( f"\n\t<table>\n" f"\t\t<thead>\n" f"\t\t\t<tr>\n" f'\t\t\t\t<th colspan="3">\n' f"\t\t\t\t\t<h2>Scene {instance_number}</h2>\n" f"\t\t\t\t</th>\n\t\t\t</tr>\n" f"\t\t</thead>\n" f"\t\t<tbody>\n" f"\t\t\t<tr>\n" f"\t\t\t\t<td>\n" f'\t\t\t\t\t<h3 id="situation-{instance_number}">Situation</h3>\n' f"\t\t\t\t</td>\n" f"\t\t\t\t<td>\n" f'\t\t\t\t\t<h3 id="linguistic-{instance_number}">Language</h3>\n' f"\t\t\t\t</td>\n" f"\t\t\t\t<td>\n" f'\t\t\t\t\t<h3 id="perception-{instance_number}">Learner Perception</h3>\n' f"\t\t\t\t</td>\n" f"\t\t\t</tr>\n" f"\t\t\t<tr>\n" f'\t\t\t\t<td valign="top">{instance_holder.situation}\n\t\t\t\t</td>\n' f'\t\t\t\t<td valign="top">{instance_holder.lingustics}</td>\n' f'\t\t\t\t<td valign="top">{instance_holder.perception}\n\t\t\t\t</td>\n' f"\t\t\t</tr>\n\t\t</tbody>\n\t</table>" ) html_out.write("\n</body>") html_out.write("\t<a href='index.html'> Back to Index</a>") with open( str(output_directory / "index.html"), "w", encoding="utf-8" ) as index_out: index_out.write(f"<head><title>{title}</title></head><body>") index_out.write("<ul>") for ( instance_group_title, instance_group_dump_file_relative_path, ) in files_written: index_out.write( f"\t<li><a href='{instance_group_dump_file_relative_path}'>" f"{instance_group_title}</a></li>" ) index_out.write("</ul>") index_out.write("</body>") def dump_to_html( self, instance_groups: Iterable[ InstanceGroup[ HighLevelSemanticsSituation, LinearizedDependencyTree, DevelopmentalPrimitivePerceptionFrame, ] ], *, output_directory: Path, title: str, ): r""" Method to take a list of `InstanceGroup`\ s and turns each one into an individual page Given a list of `InstanceGroup`\ s and an output directory of *outputdestination* along with a *title* for the pages the generator loops through each group and calls the internal method to create HTML pages. """ # first nuke the output directory # we check it only contains HTML files for safety if output_directory.exists(): for f in output_directory.iterdir(): if f.suffix != ".html": raise RuntimeError( f"Output directory does not appear to be a curriculum " f"dump. It contains the non-html file {f}" ) shutil.rmtree(str(output_directory)) output_directory.mkdir(parents=True, exist_ok=True) files_written: List[Tuple[str, str]] = [] # write each instance group to its own file for (idx, instance_group) in enumerate(instance_groups): instance_group_header = f"{idx:03} - {instance_group.name()}" # not absolute because when we use this to make links in index.html, # we don't want them to break if the user moves the directory. relative_filename = f"{instance_group_header}.html" files_written.append((instance_group_header, relative_filename)) self._dump_instance_group( instance_group=instance_group, output_destination=output_directory / relative_filename, title=f"{instance_group_header} - {title}", ) # write an table of contents to index.html with open(output_directory / "index.html", "w", encoding="utf-8") as index_out: index_out.write(f"<head><title>{title}</title></head><body>") index_out.write("<ul>") for ( instance_group_title, instance_group_dump_file_relative_path, ) in files_written: index_out.write( f"\t<li><a href='{instance_group_dump_file_relative_path}'>" f"{instance_group_title}</a></li>" ) index_out.write("</ul>") index_out.write("</body>") def _dump_instance_group( self, instance_group: InstanceGroup[ HighLevelSemanticsSituation, LinearizedDependencyTree, DevelopmentalPrimitivePerceptionFrame, ], title: str, output_destination: Path, ): """ Internal generation method for individual instance groups into HTML pages Given an `InstanceGroup` with a `HighLevelSemanticsSituation`, `LinearizedDependencyTree`, and `DevelopmentalPrimitivePerceptionFrame` this function creates an html page at the given *outputdestination* and *title*. If the file already exists and *overwrite* is set to False an error is raised in execution. Each page turns an instance group with each "instance" as an indiviudal section on the page. """ # PreRender Instances so we can remove duplicates by converting to an immutable set rendered_instances = [] for (situation, dependency_tree, perception) in instance_group.instances(): if not isinstance(situation, HighLevelSemanticsSituation): raise RuntimeError( f"Expected the Situation to be HighLevelSemanticsSituation got {type(situation)}" ) if not ( isinstance(dependency_tree, LinearizedDependencyTree) or isinstance(dependency_tree, TokenSequenceLinguisticDescription) ): raise RuntimeError( f"Expected the Lingustics to be LinearizedDependencyTree or TokenSequenceLinguisticDescription got {type(dependency_tree)}" ) if not ( isinstance(perception, PerceptualRepresentation) and isinstance( perception.frames[0], DevelopmentalPrimitivePerceptionFrame ) ): raise RuntimeError( f"Expected the Perceptual Representation to contain DevelopmentalPrimitivePerceptionFrame got " f"{type(perception.frames)}" ) (situation_text, speaker) = self.situation_text(situation) rendered_instances.append( InstanceHolder( situation=situation_text, lingustics=self._linguistic_text(dependency_tree, speaker), perception=self.perception_text(perception), ) ) with open(output_destination, "w", encoding="utf-8") as html_out: html_out.write(f"<head>\n\t<style>{CSS}\n\t</style>\n</head>") html_out.write(f"\n<body>\n\t<h1>{title}</h1>") html_out.write("\t<a href='index.html'> Back to Index</a>") html_out.write(EXPLANATION_HEADER.format(date=date.today())) # By using the immutable set we guarantee iteration order and remove duplicates for (instance_number, instance_holder) in enumerate( immutableset(rendered_instances) ): html_out.write( f"\n\t<table>\n" f"\t\t<thead>\n" f"\t\t\t<tr>\n" f'\t\t\t\t<th colspan="3">\n' f"\t\t\t\t\t<h2>Scene {instance_number}</h2>\n" f"\t\t\t\t</th>\n\t\t\t</tr>\n" f"\t\t</thead>\n" f"\t\t<tbody>\n" f"\t\t\t<tr>\n" f"\t\t\t\t<td>\n" f'\t\t\t\t\t<h3 id="situation-{instance_number}">Situation</h3>\n' f"\t\t\t\t</td>\n" f"\t\t\t\t<td>\n" f'\t\t\t\t\t<h3 id="linguistic-{instance_number}">Language</h3>\n' f"\t\t\t\t</td>\n" f"\t\t\t\t<td>\n" f'\t\t\t\t\t<h3 id="perception-{instance_number}">Learner Perception</h3>\n' f"\t\t\t\t</td>\n" f"\t\t\t</tr>\n" f"\t\t\t<tr>\n" f'\t\t\t\t<td valign="top">{instance_holder.situation}\n\t\t\t\t</td>\n' f'\t\t\t\t<td valign="top">{instance_holder.lingustics}</td>\n' f'\t\t\t\t<td valign="top">{instance_holder.perception}\n\t\t\t\t</td>\n' f"\t\t\t</tr>\n\t\t</tbody>\n\t</table>" ) html_out.write("\t<a href='index.html'> Back to Index</a>") html_out.write("\n</body>") def situation_text( self, situation: HighLevelSemanticsSituation ) -> Tuple[str, Optional[SituationObject]]: """ Converts a situation description into its sub-parts as a table entry """ speaker = None output_text = ["\n\t\t\t\t\t<h4>Objects</h4>\n\t\t\t\t\t<ul>"] seen_handles_to_next_index: Dict[str, int] = {} situation_obj_to_handle: Dict[SituationObject, str] = {} for obj in situation.all_objects: handle: str if obj.ontology_node.handle in seen_handles_to_next_index: handle = ( obj.ontology_node.handle + "_" + str(seen_handles_to_next_index[obj.ontology_node.handle]) ) seen_handles_to_next_index[obj.ontology_node.handle] += 1 else: handle = obj.ontology_node.handle + "_0" seen_handles_to_next_index[obj.ontology_node.handle] = 1 property_string: str prop_strings = [] if obj.properties: for prop in obj.properties: prop_strings.append(prop.handle) if prop == IS_SPEAKER: speaker = obj property_string = "[" + ",".join(prop_strings) + "]" else: property_string = "" output_text.append(f"\t\t\t\t\t\t<li>{handle}{property_string}</li>") situation_obj_to_handle[obj] = handle output_text.append("\t\t\t\t\t</ul>") if situation.actions: output_text.append("\t\t\t\t\t<h4>Actions</h4>\n\t\t\t\t\t<ul>") for acts in situation.actions: output_text.append( f"\t\t\t\t\t\t<li>{acts.action_type.handle}</li>\n\t\t\t\t\t<ul>" ) for mapping in acts.argument_roles_to_fillers.keys(): for object_ in acts.argument_roles_to_fillers[mapping]: output_text.append( f"\t\t\t\t\t\t<li>{mapping.handle} is {self._situation_object_or_region_text(object_, situation_obj_to_handle)}</li>" ) for mapping in acts.auxiliary_variable_bindings.keys(): output_text.append( f"\t\t\t\t\t\t<li>{mapping.debug_handle} is {self._situation_object_or_region_text(acts.auxiliary_variable_bindings[mapping], situation_obj_to_handle)}" ) output_text.append("\t\t\t\t\t</ul>") if situation.always_relations: output_text.append("\t\t\t\t\t<h4>Relations</h4>\n\t\t\t\t\t<ul>") for rel in situation.always_relations: output_text.append( f"\t\t\t\t\t\t<li>{rel.relation_type.handle}({situation_obj_to_handle[rel.first_slot]}," f"{self._situation_object_or_region_text(rel.second_slot, situation_obj_to_handle)})</li>" ) output_text.append("\t\t\t\t\t</ul>") if situation.syntax_hints: output_text.append("\t\t\t\t\t<h4>Syntax Hints</h4>\n\t\t\t\t\t<ul>") for hint in situation.syntax_hints: output_text.append(f"\t\t\t\t\t\t<li>{hint}</li>") output_text.append("\t\t\t\t\t</ul>") return ("\n".join(output_text), speaker) def _situation_object_or_region_text( self, obj_or_region: Union[SituationObject, SituationRegion], obj_to_handle: Dict[SituationObject, str], ) -> str: def _direction(direction: Direction[SituationObject]) -> str: polarity = "+" if direction.positive else "-" axes_function = ( direction.relative_to_axis if isinstance(direction.relative_to_axis, _GravitationalAxis) else direction.relative_to_axis.__repr__( # type: ignore object_map=obj_to_handle ) ) return f"{polarity}{axes_function}" if isinstance(obj_or_region, SituationObject): return obj_to_handle[obj_or_region] else: parts = [] parts.append( f"reference_object={obj_to_handle[obj_or_region.reference_object]}" ) if obj_or_region.distance: parts.append(f"distance={obj_or_region.distance.name}") if obj_or_region.direction: parts.append(f"direction={_direction(obj_or_region.direction)}") return "Region(" + ", ".join(parts) + ")" _opposite_size_relations: Dict[OntologyNode, OntologyNode] = { SMALLER_THAN: BIGGER_THAN, BIGGER_THAN: SMALLER_THAN, MUCH_SMALLER_THAN: MUCH_BIGGER_THAN, MUCH_BIGGER_THAN: MUCH_SMALLER_THAN, } # Collapse pairs of size relations (biggerThan/smallerThan) into # a single relation def _get_single_size_relation( self, relation: Relation[Any], relation_set: ImmutableSet[Relation[Any]] ): single_size_relation: Optional[Tuple[Any, str, Any]] = None if relation.relation_type in self._opposite_size_relations: if ( Relation( self._opposite_size_relations[relation.relation_type], relation.second_slot, relation.first_slot, ) in relation_set ): if relation.relation_type == SMALLER_THAN: single_size_relation = ( relation.second_slot, ">", relation.first_slot, ) elif relation.relation_type == BIGGER_THAN: single_size_relation = ( relation.first_slot, ">", relation.second_slot, ) elif relation.relation_type == MUCH_SMALLER_THAN: single_size_relation = ( relation.second_slot, ">>", relation.first_slot, ) else: single_size_relation = ( relation.first_slot, ">>", relation.second_slot, ) return single_size_relation def perception_text( self, perception: PerceptualRepresentation[DevelopmentalPrimitivePerceptionFrame] ) -> str: """ Turns a perception into a list of items in the perceptions frames. """ output_text: List[str] = [] check_state( len(perception.frames) in (1, 2), "Only know how to handle 1 or 2 frame " "perceptions for now", ) perception_is_dynamic = len(perception.frames) > 1 # first, we build an index of objects to their properties. # This will be used so that when we list the objects, # we can easily list their properties in brackets right after them. def extract_subject(prop: PropertyPerception) -> ObjectPerception: return prop.perceived_object first_frame_properties = _index_to_setmultidict( perception.frames[0].property_assertions, extract_subject ) second_frame_properties = ( _index_to_setmultidict( perception.frames[1].property_assertions, extract_subject ) if perception_is_dynamic else immutablesetmultidict() ) # Next, we determine what objects persist between both frames # and which do not. first_frame_objects = perception.frames[0].perceived_objects second_frame_objects = ( perception.frames[1].perceived_objects if perception_is_dynamic else immutableset() ) static_objects = ( first_frame_objects.intersection(second_frame_objects) if perception_is_dynamic else first_frame_objects ) all_objects = first_frame_objects.union(second_frame_objects) # For objects, properties, and relations we will use arrows to indicate # when something beings or ceased to exist between frames. # Since the logic will be the same for all three types, # we pull it out into a function. def compute_arrow( item: Any, static_items: AbstractSet[Any], first_frame_items: AbstractSet[Any] ) -> Tuple[str, str]: if item in static_items: # item doesn't change - no arrow return ("", "") elif item in first_frame_items: # item ceases to exist return ("", " ---> Ø") else: # item beings to exist in the second frame return ("Ø ---> ", "") # the logic for rendering objects, which will be used in the loop below. # This needs to be an inner function so it can access the frame property maps, etc. def render_object(obj: ObjectPerception) -> str: obj_text = f"<i>{obj.debug_handle}</i>" first_frame_obj_properties = first_frame_properties[obj] second_frame_obj_properties = second_frame_properties[obj] static_properties = ( second_frame_obj_properties.intersection(first_frame_obj_properties) if second_frame_obj_properties else first_frame_obj_properties ) # logic for rendering properties, for use in the loop below. def render_property(prop: PropertyPerception) -> str: (prop_prefix, prop_suffix) = compute_arrow( prop, static_properties, first_frame_obj_properties ) prop_string: str if isinstance(prop, HasColor): prop_string = ( f'<span style="background-color: {prop.color}; ' f'color: {prop.color.inverse()}; border: 1px solid black;">' f"color={prop.color.hex}</span>" ) elif isinstance(prop, HasBinaryProperty): prop_string = prop.binary_property.handle else: raise RuntimeError(f"Cannot render property: {prop}") return f"{prop_prefix}{prop_string}{prop_suffix}" all_properties: ImmutableSet[PropertyPerception] = immutableset( flatten([first_frame_obj_properties, second_frame_obj_properties]) ) prop_strings = [render_property(prop) for prop in all_properties] if prop_strings: return f"{obj_text}[{'; '.join(prop_strings)}]" else: return obj_text # Here we process the relations between the two scenes to determine all relations. # This has to be done before rending objects so we can use the PART_OF relation to order # the objects. first_frame_relations = perception.frames[0].relations second_frame_relations = ( perception.frames[1].relations if perception_is_dynamic else immutableset() ) static_relations = ( second_frame_relations.intersection(first_frame_relations) if perception_is_dynamic else first_frame_relations ) all_relations = first_frame_relations.union(second_frame_relations) # Here we add the perceived objects to a NetworkX DiGraph with PART_OF relations being the # edges between objects. This allows us to do pre-order traversal of the Graph to make a # nested <ul></ul> for the objects rather than a flat list. graph = DiGraph() root = ObjectPerception("root", axes=WORLD_AXES) graph.add_node(root) expressed_relations = set() axis_to_object: Dict[GeonAxis, ObjectPerception] = {} for object_ in all_objects: graph.add_node(object_) graph.add_edge(root, object_) for axis in object_.axes.all_axes: axis_to_object[axis] = object_ for relation_ in all_relations: if relation_.relation_type == PART_OF: graph.add_edge(relation_.second_slot, relation_.first_slot) if graph.has_edge(root, relation_.first_slot): graph.remove_edge(root, relation_.first_slot) expressed_relations.add(relation_) # Next, we render objects, together with their properties, using preorder DFS Traversal # We also add in `In Region` relationships at this step for objects which have them. output_text.append("\n\t\t\t\t\t<h5>Perceived Objects</h5>\n\t\t\t\t\t<ul>") visited = set() region_relations = immutableset( region for region in all_relations if region.relation_type == IN_REGION ) # This loop doesn't quite get the tab spacing right. It could at the cost of increased # complexity. Would need to track the "depth" we are currently at. axis_info = perception.frames[0].axis_info def dfs_walk(node, depth=0): visited.add(node) if not node == root: (obj_prefix, obj_suffix) = compute_arrow( node, static_objects, first_frame_objects ) output_text.append( "\t" * (6 + depth) + f"<li>{obj_prefix}{render_object(node)}{obj_suffix}<ul>" ) if node.geon: output_text.append( f"\t\t\t\t\t\t<li>Geon: {self._render_geon(node.geon, indent_dept=7)}</li>" ) # Handle Region Relations for region_relation in region_relations: if region_relation.first_slot == node: (relation_prefix, relation_suffix) = compute_arrow( region_relation, static_relations, first_frame_relations ) relation_str = self._render_relation(axis_info, region_relation) output_text.append( f"\t\t\t\t\t\t<li>{relation_prefix}" f"{relation_str}{relation_suffix}</li>" ) expressed_relations.add(region_relation) for succ in graph.successors(node): if succ not in visited: depth = depth + 6 dfs_walk(succ, depth) depth = depth - 6 output_text.append("\t" * (6 + depth) + "</ul></li>") dfs_walk(root) output_text.append("\t\t\t\t\t</ul>") # Finally we render remaining relations between objects remaining_relations = immutableset( relation for relation in all_relations if relation not in expressed_relations ) if remaining_relations: output_text.append("\t\t\t\t\t<h5>Other Relations</h5>\n\t\t\t\t\t<ul>") for relation in remaining_relations: (relation_prefix, relation_suffix) = compute_arrow( relation, static_relations, first_frame_relations ) single_size_relation: Optional[ Tuple[Any, str, Any] ] = self._get_single_size_relation(relation, all_relations) if single_size_relation: relation_text = f"{single_size_relation[0]} {single_size_relation[1]} {single_size_relation[2]}" size_output = f"\t\t\t\t\t\t<li>{relation_prefix}{relation_text}{relation_suffix}</li>" if size_output not in output_text: output_text.append(size_output) else: output_text.append( f"\t\t\t\t\t\t<li>{relation_prefix}{relation}{relation_suffix}</li>" ) output_text.append("\t\t\t\t\t</ul>") if perception.during: output_text.append("\t\t\t\t\t<h5>During the action</h5>") output_text.append(self._render_during(perception.during, indent_depth=5)) if axis_info and axis_info.axes_facing: output_text.append(("\t\t\t\t\t<h5>Axis Facings</h5>")) output_text.append(("\t\t\t\t\t<ul>")) for object_ in axis_info.axes_facing: output_text.append( f"\t\t\t\t\t\t<li>{object_.debug_handle} faced by:\n\t\t\t\t\t\t<ul>" ) for axis in axis_info.axes_facing[object_]: output_text.append( f"\t\t\t\t\t\t\t<li>{axis} possessed by {axis_to_object[axis]}</li>" ) output_text.append("\t\t\t\t\t\t</ul>") output_text.append("\t\t\t\t\t</ul>") return "\n".join(output_text) def _render_relation( self, axis_info: AxesInfo[ObjectPerception], relation: Relation[ObjectPerception] ) -> str: second_slot_str: str filler2 = relation.second_slot if isinstance(filler2, Region): parts = [str(filler2.reference_object)] if filler2.distance: parts.append(f"distance={filler2.distance}") if filler2.direction: parts.append( f"direction={sign(filler2.direction.positive)}" f"{filler2.direction.relative_to_concrete_axis(axis_info)}" ) second_slot_str = f"Region({','.join(parts)})" else: second_slot_str = str(filler2) return f"{relation.relation_type}({relation.first_slot}, {second_slot_str})" def _render_during( self, during: DuringAction[ObjectPerception], *, indent_depth: int = 0 ) -> str: indent = "\t" * indent_depth lines = [f"{indent}<ul>"] if during.objects_to_paths: lines.append(f"{indent}\t<li><b>Paths:</b>") lines.append(f"{indent}\t<ul>") for (object_, path) in during.objects_to_paths.items(): path_rendering = self._render_path(path, indent_depth=indent_depth + 2) lines.append(f"{indent}\t\t<li>{object_}: {path_rendering}</li></ul>") lines.append(f"{indent}</ul></li>") if during.continuously: lines.append(f"{indent}\t<li><b>Relations which hold continuously:</b>") lines.append(f"{indent}\t<ul>") for relation in during.continuously: lines.append(f"{indent}\t\t<li>{relation}</li>") lines.append(f"{indent}</ul></li>") if during.at_some_point: lines.append(f"{indent}\t<li><b>Relations which hold at some point:</b>") lines.append(f"{indent}\t<ul>") for relation in during.at_some_point: lines.append(f"{indent}\t\t<li>{relation}</li>") lines.append(f"{indent}</ul></li>") lines.append(f"{indent}</ul>") return "\n".join(lines) def _render_path( self, path: SpatialPath[ObjectPerception], *, indent_depth: int = 0 ) -> str: indent = "\t" * indent_depth lines = [f"{indent}<ul>"] lines.append(f"{indent}\t<li>") lines.append(str(path)) lines.append(f"{indent}\t</li>") return "\n".join(lines) def _linguistic_text( self, linguistic: LinearizedDependencyTree, speaker: Optional[SituationObject] ) -> str: """ Parses the Linguistic Description of a Linearized Dependency Tree into a table entry Takes a `LinearizedDependencyTree` which is turned into a token sequence and phrased as a sentence for display. Returns a List[str] """ if speaker: return ( f'{speaker.ontology_node.handle} says: "' + " ".join(linguistic.as_token_sequence()) + '"' ) else: return " ".join(linguistic.as_token_sequence()) def _render_geon(self, geon: Geon, *, indent_dept: int = 0) -> str: indent = "\t" * indent_dept lines = [f"{indent}<ul>"] lines.append( f"{indent}\t<li>Cross Section: {geon.cross_section} | Cross Section Size: {geon.cross_section_size}</li>" ) if geon.generating_axis == geon.axes.primary_axis: lines.append( f"{indent}\t<li><b>Generating Axis: {geon.generating_axis}</b></li>" ) else: lines.append(f"{indent}\t<li>Generating Axis: {geon.generating_axis}</li>") if geon.axes.orienting_axes: lines.append(f"{indent}\t<li>Orienting Axes:") lines.append(f"{indent}\t<ul>") for axis in geon.axes.orienting_axes: if axis == geon.axes.primary_axis: lines.append(f"{indent}\t\t<li><b>{axis}</b></li>") else: lines.append(f"{indent}\t\t<li>{axis}</li>") lines.append(f"{indent}\t</ul>") lines.append(f"{indent}\t</li>") if geon.axes.axis_relations: lines.append(f"{indent}\t<li>Axes Relations:") lines.append(f"{indent}\t<ul>") for axis_relation in geon.axes.axis_relations: single_size_relation: Optional[ Tuple[Any, str, Any] ] = self._get_single_size_relation( axis_relation, geon.axes.axis_relations ) if single_size_relation: size_relation_text = f"{indent}\t\t<li>{single_size_relation[0].debug_name} {single_size_relation[1]} {single_size_relation[2].debug_name}</li>" if size_relation_text not in lines: lines.append(size_relation_text) elif isinstance(axis_relation.second_slot, Region): lines.append( f"{indent}\t\t<li>{axis_relation.relation_type}({axis_relation.first_slot.debug_name},{axis_relation.second_slot})</li>" ) else: lines.append( f"{indent}\t\t<li>{axis_relation.relation_type}({axis_relation.first_slot.debug_name},{axis_relation.second_slot.debug_name})</li>" ) lines.append(f"{indent}\t</ul>") lines.append(f"{indent}\t</li>") lines.append(f"{indent}</ul>") return "\n".join(lines) CSS = """ body { font-size: 1em; font-family: sans-serif; } table td { p padding: 1em; background-color: #FAE5D3 ; } """ _KT = TypeVar("_KT") _VT = TypeVar("_VT") def _index_to_setmultidict( items: Iterable[_VT], index_func: Callable[[_VT], _KT] ) -> ImmutableSetMultiDict[_KT, _VT]: return immutablesetmultidict((index_func(x), x) for x in items) if __name__ == "__main__": parameters_only_entry_point(main, usage_message=USAGE_MESSAGE)
""" Manipulation of EPI data. .. testsetup:: >>> tmpdir = getfixture('tmpdir') >>> tmp = tmpdir.chdir() # changing to a temporary directory >>> nb.Nifti1Image(np.zeros((90, 90, 60)), None, None).to_filename( ... tmpdir.join('epi.nii.gz').strpath) """ def get_trt(in_meta, in_file=None): r""" Obtain the *total readout time* :math:`T_\text{ro}` from available metadata. BIDS provides two standard mechanisms to store the total readout time, :math:`T_\text{ro}`, of :abbr:`EPI (echo-planar imaging)` scans. The first option is that a ``TotalReadoutTime`` field is found in the JSON sidecar: >>> meta = {'TotalReadoutTime': 0.05251} >>> get_trt(meta) 0.05251 Alternatively, the *effective echo spacing* :math:`t_\text{ees}` (``EffectiveEchoSpacing`` BIDS field) may be provided. Then, the total readout time :math:`T_\text{ro}` can be calculated as follows: .. math :: T_\text{ro} = t_\text{ees} \cdot (N_\text{PE} - 1), \label{eq:rotime-ees}\tag{1} where :math:`N_\text{PE}` is the number of pixels along the :abbr:`PE (phase-encoding)` direction **on the reconstructed matrix**. >>> meta = {'EffectiveEchoSpacing': 0.00059, ... 'PhaseEncodingDirection': 'j-'} >>> f"{get_trt(meta, in_file="epi.nii.gz"):g}" '0.05251' Using nonstandard metadata, there are further options. If the *echo spacing* :math:`t_\text{es}` (do not confuse with the *effective echo spacing*, :math:`t_\text{ees}`) is set and the parallel acceleration factor (:abbr:`GRAPPA (GeneRalized Auto-calibrating Partial Parallel Acquisition)`, :abbr:`ARC (Auto-calibrating Reconstruction for Cartesian imaging)`, etc.) of the EPI :math:`f_\text{acc}` is known, then it is possible to calculate the readout time as: .. math :: T_\text{ro} = t_\text{es} \cdot (\left\lfloor\frac{N_\text{PE}}{f_\text{acc}} \right\rfloor - 1). >>> meta = {'EchoSpacing': 0.00119341, ... 'PhaseEncodingDirection': 'j-', ... 'ParallelReductionFactorInPlane': 2} >>> f"{get_trt(meta, in_file="epi.nii.gz"):g}" '0.05251' .. caution:: Philips stores different parameter names, and there has been quite a bit of reverse-engineering and discussion around how to get the total readout-time right for the vendor. The implementation done here follows the findings of Dr. Rorden, summarized in `this post <https://github.com/rordenlab/dcm2niix/issues/377#issuecomment-598685590>`__. It seems to be possible to calculate the **effective** echo spacing (in seconds) as: .. math :: t_\text{ees} = \frac{f_\text{wfs}} {B_0 \gamma \Delta_\text{w/f} \cdot (f_\text{EPI} + 1)}, \label{eq:philips-ees}\tag{2} where :math:`f_\text{wfs}` is the water-fat-shift in pixels, :math:`B_0` is the field strength in T, :math:`\gamma` is the gyromagnetic ratio, :math:`\Delta_\text{w/f}` is the water/fat difference in ppm and :math:`f_\text{EPI}` is Philip's «*EPI factor*,» which accounts for in-plane acceleration with :abbr:`SENSE (SENSitivity Encoding)`. The problem with Philip's «*EPI factor*» is that it is absolutely necessary to calculate the effective echo spacing, because the reported SENSE acceleration factor does not allow to calculate the effective train length from the reconstructed matrix size along the PE direction (neither from the acquisition matrix size if it is strangely found stored within the metadata). For :math:`B_0 = 3.0` [T], then :math:`B_0 \gamma \Delta_\text{w/f} \approx 434.215`, as in `early discussions held on the FSL listserv <https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=fsl;162ab1a3.1308>`__. As per Dr. Rorden, Eq. :math:`\eqref{eq:philips-ees}` is equivalent to the following formulation: .. math :: t_\text{ees} = \frac{f_\text{wfs}} {3.4 \cdot F_\text{img} \cdot (f_\text{EPI} + 1)}, where :math:`F_\text{img}` is the «*Imaging Frequency*» in MHz, as reported by the Philips console. This second formulation seems to be preferred for the better accuracy of the Imaging Frequency field over the Magnetic field strength. Once the effective echo spacing is obtained, the total readout time can then be calculated with Eq. :math:`\eqref{eq:rotime-ees}`. >>> meta = {'WaterFatShift': 9.2227266, ... 'EPIFactor': 35, ... 'ImagingFrequency': 127.7325, ... 'PhaseEncodingDirection': 'j-'} >>> f"{get_trt(meta, in_file="epi.nii.gz"):0.5f}" '0.05251' >>> meta = {'WaterFatShift': 9.2227266, ... 'EPIFactor': 35, ... 'MagneticFieldStrength': 3, ... 'PhaseEncodingDirection': 'j-'} >>> f"{get_trt(meta, in_file="epi.nii.gz"):0.5f}" '0.05251' If enough metadata is not available, raise an error: >>> get_trt({'PhaseEncodingDirection': 'j-'}, ... in_file='epi.nii.gz') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ValueError: .. admonition:: Thanks With thanks to Dr. Rorden for his thourough `assessment <https://github.com/rordenlab/dcm2niix/issues/377>`__ and `validation <https://osf.io/9ucek/>`__ on the matter, and to Pravesh Parekh for `his wonderful review on NeuroStars <https://neurostars.org/t/consolidating-epi-echo-spacing-and-readout-time-for-philips-scanner/4406>`__. .. admonition:: See Also Some useful links regarding the calculation of the readout time for Philips: * `Brain Voyager documentation <https://support.brainvoyager.com/brainvoyager/functional-analysis-preparation/29-pre-processing/78-epi-distortion-correction-echo-spacing-and-bandwidth>`__ -- Please note that I (OE) *believe* the statement about the effective echo-spacing on Philips **is wrong**, as the EPI factor should account for the in-plane acceleration. * `Disappeared documentation of the Spinoza Center <https://web.archive.org/web/20130420035502/www.spinozacentre.nl/wiki/index.php/NeuroWiki:Current_developments>`__. * This `guide for preprocessing of EPI data <https://osf.io/hks7x/>`__. """ import nibabel as nb # Use case 1: TRT is defined if "TotalReadoutTime" in in_meta: trt = in_meta.get("TotalReadoutTime") if not trt: raise ValueError(f"'{trt}'") return trt # npe = N voxels PE direction pe_index = "ijk".index(in_meta["PhaseEncodingDirection"][0]) npe = nb.load(in_file).shape[pe_index] # Use case 2: EES is defined ees = in_meta.get("EffectiveEchoSpacing") if ees: # Effective echo spacing means that acceleration factors have been accounted for. return ees * (npe - 1) try: echospacing = in_meta["EchoSpacing"] acc_factor = in_meta["ParallelReductionFactorInPlane"] except KeyError: pass else: # etl = effective train length etl = npe // acc_factor return echospacing * (etl - 1) # Use case 3 (Philips scans) try: wfs = in_meta["WaterFatShift"] epifactor = in_meta["EPIFactor"] except KeyError: pass else: wfs_hz = ( (in_meta.get("ImagingFrequency", 0) * 3.39941) or (in_meta.get("MagneticFieldStrength", 0) * 144.7383333) or None ) if wfs_hz: ees = wfs / (wfs_hz * (epifactor + 1)) return ees * (npe - 1) raise ValueError("Unknown total-readout time specification") def epi_mask(in_file, out_file=None): """Use grayscale morphological operations to obtain a quick mask of EPI data.""" from pathlib import Path import nibabel as nb import numpy as np from scipy import ndimage from skimage.morphology import ball if out_file is None: out_file = Path("mask.nii.gz").absolute() img = nb.load(in_file) data = img.get_fdata(dtype="float32") # First open to blur out the skull around the brain opened = ndimage.grey_opening(data, structure=ball(3)) # Second, close large vessels and the ventricles closed = ndimage.grey_closing(opened, structure=ball(2)) # Window filter on percentile 30 closed -= np.percentile(closed, 30) # Window filter on percentile 90 of data maxnorm = np.percentile(closed[closed > 0], 90) closed = np.clip(closed, a_min=0.0, a_max=maxnorm) # Calculate index of center of masses cm = tuple(np.round(ndimage.measurements.center_of_mass(closed)).astype(int)) # Erode the picture of the brain by a lot eroded = ndimage.grey_erosion(closed, structure=ball(5)) # Calculate the residual wshed = opened - eroded wshed -= wshed.min() wshed = np.round(1e3 * wshed / wshed.max()).astype(np.uint16) markers = np.zeros_like(wshed, dtype=int) markers[cm] = 2 markers[0, 0, -1] = -1 # Run watershed labels = ndimage.watershed_ift(wshed, markers) hdr = img.header.copy() hdr.set_data_dtype("uint8") nb.Nifti1Image( ndimage.binary_dilation(labels == 2, ball(2)).astype("uint8"), img.affine, hdr ).to_filename(out_file) return out_file
""" Manipulation of EPI data. .. testsetup:: >>> tmpdir = getfixture('tmpdir') >>> tmp = tmpdir.chdir() # changing to a temporary directory >>> nb.Nifti1Image(np.zeros((90, 90, 60)), None, None).to_filename( ... tmpdir.join('epi.nii.gz').strpath) """ def get_trt(in_meta, in_file=None): r""" Obtain the *total readout time* :math:`T_\text{ro}` from available metadata. BIDS provides two standard mechanisms to store the total readout time, :math:`T_\text{ro}`, of :abbr:`EPI (echo-planar imaging)` scans. The first option is that a ``TotalReadoutTime`` field is found in the JSON sidecar: >>> meta = {'TotalReadoutTime': 0.05251} >>> get_trt(meta) 0.05251 Alternatively, the *effective echo spacing* :math:`t_\text{ees}` (``EffectiveEchoSpacing`` BIDS field) may be provided. Then, the total readout time :math:`T_\text{ro}` can be calculated as follows: .. math :: T_\text{ro} = t_\text{ees} \cdot (N_\text{PE} - 1), \label{eq:rotime-ees}\tag{1} where :math:`N_\text{PE}` is the number of pixels along the :abbr:`PE (phase-encoding)` direction **on the reconstructed matrix**. >>> meta = {'EffectiveEchoSpacing': 0.00059, ... 'PhaseEncodingDirection': 'j-'} >>> f"{get_trt(meta, in_file='epi.nii.gz'):g}" '0.05251' Using nonstandard metadata, there are further options. If the *echo spacing* :math:`t_\text{es}` (do not confuse with the *effective echo spacing*, :math:`t_\text{ees}`) is set and the parallel acceleration factor (:abbr:`GRAPPA (GeneRalized Auto-calibrating Partial Parallel Acquisition)`, :abbr:`ARC (Auto-calibrating Reconstruction for Cartesian imaging)`, etc.) of the EPI :math:`f_\text{acc}` is known, then it is possible to calculate the readout time as: .. math :: T_\text{ro} = t_\text{es} \cdot (\left\lfloor\frac{N_\text{PE}}{f_\text{acc}} \right\rfloor - 1). >>> meta = {'EchoSpacing': 0.00119341, ... 'PhaseEncodingDirection': 'j-', ... 'ParallelReductionFactorInPlane': 2} >>> f"{get_trt(meta, in_file='epi.nii.gz'):g}" '0.05251' .. caution:: Philips stores different parameter names, and there has been quite a bit of reverse-engineering and discussion around how to get the total readout-time right for the vendor. The implementation done here follows the findings of Dr. Rorden, summarized in `this post <https://github.com/rordenlab/dcm2niix/issues/377#issuecomment-598685590>`__. It seems to be possible to calculate the **effective** echo spacing (in seconds) as: .. math :: t_\text{ees} = \frac{f_\text{wfs}} {B_0 \gamma \Delta_\text{w/f} \cdot (f_\text{EPI} + 1)}, \label{eq:philips-ees}\tag{2} where :math:`f_\text{wfs}` is the water-fat-shift in pixels, :math:`B_0` is the field strength in T, :math:`\gamma` is the gyromagnetic ratio, :math:`\Delta_\text{w/f}` is the water/fat difference in ppm and :math:`f_\text{EPI}` is Philip's «*EPI factor*,» which accounts for in-plane acceleration with :abbr:`SENSE (SENSitivity Encoding)`. The problem with Philip's «*EPI factor*» is that it is absolutely necessary to calculate the effective echo spacing, because the reported SENSE acceleration factor does not allow to calculate the effective train length from the reconstructed matrix size along the PE direction (neither from the acquisition matrix size if it is strangely found stored within the metadata). For :math:`B_0 = 3.0` [T], then :math:`B_0 \gamma \Delta_\text{w/f} \approx 434.215`, as in `early discussions held on the FSL listserv <https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=fsl;162ab1a3.1308>`__. As per Dr. Rorden, Eq. :math:`\eqref{eq:philips-ees}` is equivalent to the following formulation: .. math :: t_\text{ees} = \frac{f_\text{wfs}} {3.4 \cdot F_\text{img} \cdot (f_\text{EPI} + 1)}, where :math:`F_\text{img}` is the «*Imaging Frequency*» in MHz, as reported by the Philips console. This second formulation seems to be preferred for the better accuracy of the Imaging Frequency field over the Magnetic field strength. Once the effective echo spacing is obtained, the total readout time can then be calculated with Eq. :math:`\eqref{eq:rotime-ees}`. >>> meta = {'WaterFatShift': 9.2227266, ... 'EPIFactor': 35, ... 'ImagingFrequency': 127.7325, ... 'PhaseEncodingDirection': 'j-'} >>> f"{get_trt(meta, in_file='epi.nii.gz'):0.5f}" '0.05251' >>> meta = {'WaterFatShift': 9.2227266, ... 'EPIFactor': 35, ... 'MagneticFieldStrength': 3, ... 'PhaseEncodingDirection': 'j-'} >>> f"{get_trt(meta, in_file='epi.nii.gz'):0.5f}" '0.05251' If enough metadata is not available, raise an error: >>> get_trt({'PhaseEncodingDirection': 'j-'}, ... in_file='epi.nii.gz') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ValueError: .. admonition:: Thanks With thanks to Dr. Rorden for his thourough `assessment <https://github.com/rordenlab/dcm2niix/issues/377>`__ and `validation <https://osf.io/9ucek/>`__ on the matter, and to Pravesh Parekh for `his wonderful review on NeuroStars <https://neurostars.org/t/consolidating-epi-echo-spacing-and-readout-time-for-philips-scanner/4406>`__. .. admonition:: See Also Some useful links regarding the calculation of the readout time for Philips: * `Brain Voyager documentation <https://support.brainvoyager.com/brainvoyager/functional-analysis-preparation/29-pre-processing/78-epi-distortion-correction-echo-spacing-and-bandwidth>`__ -- Please note that I (OE) *believe* the statement about the effective echo-spacing on Philips **is wrong**, as the EPI factor should account for the in-plane acceleration. * `Disappeared documentation of the Spinoza Center <https://web.archive.org/web/20130420035502/www.spinozacentre.nl/wiki/index.php/NeuroWiki:Current_developments>`__. * This `guide for preprocessing of EPI data <https://osf.io/hks7x/>`__. """ import nibabel as nb # Use case 1: TRT is defined if "TotalReadoutTime" in in_meta: trt = in_meta.get("TotalReadoutTime") if not trt: raise ValueError(f"'{trt}'") return trt # npe = N voxels PE direction pe_index = "ijk".index(in_meta["PhaseEncodingDirection"][0]) npe = nb.load(in_file).shape[pe_index] # Use case 2: EES is defined ees = in_meta.get("EffectiveEchoSpacing") if ees: # Effective echo spacing means that acceleration factors have been accounted for. return ees * (npe - 1) try: echospacing = in_meta["EchoSpacing"] acc_factor = in_meta["ParallelReductionFactorInPlane"] except KeyError: pass else: # etl = effective train length etl = npe // acc_factor return echospacing * (etl - 1) # Use case 3 (Philips scans) try: wfs = in_meta["WaterFatShift"] epifactor = in_meta["EPIFactor"] except KeyError: pass else: wfs_hz = ( (in_meta.get("ImagingFrequency", 0) * 3.39941) or (in_meta.get("MagneticFieldStrength", 0) * 144.7383333) or None ) if wfs_hz: ees = wfs / (wfs_hz * (epifactor + 1)) return ees * (npe - 1) raise ValueError("Unknown total-readout time specification") def epi_mask(in_file, out_file=None): """Use grayscale morphological operations to obtain a quick mask of EPI data.""" from pathlib import Path import nibabel as nb import numpy as np from scipy import ndimage from skimage.morphology import ball if out_file is None: out_file = Path("mask.nii.gz").absolute() img = nb.load(in_file) data = img.get_fdata(dtype="float32") # First open to blur out the skull around the brain opened = ndimage.grey_opening(data, structure=ball(3)) # Second, close large vessels and the ventricles closed = ndimage.grey_closing(opened, structure=ball(2)) # Window filter on percentile 30 closed -= np.percentile(closed, 30) # Window filter on percentile 90 of data maxnorm = np.percentile(closed[closed > 0], 90) closed = np.clip(closed, a_min=0.0, a_max=maxnorm) # Calculate index of center of masses cm = tuple(np.round(ndimage.measurements.center_of_mass(closed)).astype(int)) # Erode the picture of the brain by a lot eroded = ndimage.grey_erosion(closed, structure=ball(5)) # Calculate the residual wshed = opened - eroded wshed -= wshed.min() wshed = np.round(1e3 * wshed / wshed.max()).astype(np.uint16) markers = np.zeros_like(wshed, dtype=int) markers[cm] = 2 markers[0, 0, -1] = -1 # Run watershed labels = ndimage.watershed_ift(wshed, markers) hdr = img.header.copy() hdr.set_data_dtype("uint8") nb.Nifti1Image( ndimage.binary_dilation(labels == 2, ball(2)).astype("uint8"), img.affine, hdr ).to_filename(out_file) return out_file
from __future__ import annotations from enum import Enum from re import match from typing import Optional from typing import Dict from typing import List from pathlib import Path from jmm.utilities.functions import get_number class Direction(Enum): FORWARD = 1 BACKWARD = -1 class SubtitleType(Enum): NONE = 0 EMBEDDING = 1 EXTERNEL = 2 MIXING = 3 class Subtitle: def __init__(self, subtitle_type: SubtitleType, file_paths: List[Path] = None): self.subtitle_type = subtitle_type self.file_paths = file_paths or [] def __bool__(self) -> bool: return not self.subtitle_type == SubtitleType.NONE class FileInformation: _instances: Dict[Path, FileInformation] = {} def __new__(cls, file_path: Path) -> FileInformation: absolute_path = file_path.absolute() if absolute_path not in cls._instances: cls._instances[absolute_path] = super().__new__(cls) return cls._instances[absolute_path] def __init__(self, file_path: Path): self.file_path = file_path.absolute() @property def previous(self) -> Optional[FileInformation]: """ 向前查找该文件的邻居. """ return self._get_neighbor(direction=Direction.BACKWARD) @property def next(self) -> Optional[FileInformation]: """ 向后查找该文件的邻居. """ return self._get_neighbor(direction=Direction.FORWARD) @property def number(self) -> Optional[str]: """ 获取文件名中的番号. """ return get_number(self.file_path.name) @property def root(self) -> FileInformation: """ 获取文件列表的根结点. """ point = self while point: if not point.previous: break point = point.previous return point @property def index(self) -> Optional[int]: """ 查找该文件在所有邻居中的绝对位置. 比如文件名有一组文件: - ``xxx-250-cd1.mp4``, - ``xxx-250-cd2.mp4``, - ``xxx-250-cd3.mp4``. 那么: - ``FileInformation('xxx-250-cd1.mp4').index`` 的值为 ``0``, - ``FileInformation('xxx-250-cd2.mp4').index`` 的值为 ``1``, - ``FileInformation('xxx-250-cd3.mp4').index`` 的值为 ``2``. """ _index = 0 point = self.previous while point: _index += 1 point = point.previous return _index def _get_neighbor(self, direction: Direction) -> Optional[FileInformation]: """ 获取文件的邻居. 有的影片会被切分成多个文件, 用 ``-CD1``, ``-CD2`` 等后缀进行区分, 这些文件属于同一部影片, 彼此互为邻居. 邻居之间存在顺序, 顺序即为路径的字符串升序排列. 如果找不到邻居, 则返回 :py:obj:`None`. :param direction: 如果值为 :py:obj:`Direction.FORWARD` 则向前找最近的一个邻居, 如果值为 :py:obj:`Direction.BACKWARD` 则向后找最近的一个邻居. """ result = match(pattern=r'(?P<prefix>.*[-_][cC][dD])(?P<index>\d+)(?P<suffix>.*)', string=self.file_path.name) if not result: return None group = result.groupdict() next_file_path = self.file_path.with_name(f'{group['prefix']}{int(group['index']) + direction.value}{group['suffix']}') if not next_file_path.is_file(): return None return FileInformation(next_file_path) @property def subtitle(self) -> Subtitle: """ 获取文件对应的字幕. """ has_embedding_subtitle = self.file_path.stem.lower().endswith('-c') or self.file_path.stem.lower().endswith('_c') subtitle_paths = [file_path for file_path in map(self.file_path.with_suffix, ['.ass', '.srt']) if file_path.is_file()] if subtitle_paths and has_embedding_subtitle: return Subtitle(SubtitleType.MIXING, subtitle_paths) if subtitle_paths and (not has_embedding_subtitle): return Subtitle(SubtitleType.EXTERNEL, subtitle_paths) if (not subtitle_paths) and has_embedding_subtitle: return Subtitle(SubtitleType.EMBEDDING, subtitle_paths) return Subtitle(SubtitleType.NONE, subtitle_paths) def __repr__(self) -> str: return f'<file {str(self.file_path)}, {self.number}, {'with' if self.subtitle else 'without'} subtitle>' # pragma: no cover def __hash__(self) -> int: return id(self.root) def __eq__(self, other: object) -> bool: """ 判断两个对象是否相等. """ if not isinstance(other, FileInformation): return NotImplemented return self.root is other.root
from __future__ import annotations from enum import Enum from re import match from typing import Optional from typing import Dict from typing import List from pathlib import Path from jmm.utilities.functions import get_number class Direction(Enum): FORWARD = 1 BACKWARD = -1 class SubtitleType(Enum): NONE = 0 EMBEDDING = 1 EXTERNEL = 2 MIXING = 3 class Subtitle: def __init__(self, subtitle_type: SubtitleType, file_paths: List[Path] = None): self.subtitle_type = subtitle_type self.file_paths = file_paths or [] def __bool__(self) -> bool: return not self.subtitle_type == SubtitleType.NONE class FileInformation: _instances: Dict[Path, FileInformation] = {} def __new__(cls, file_path: Path) -> FileInformation: absolute_path = file_path.absolute() if absolute_path not in cls._instances: cls._instances[absolute_path] = super().__new__(cls) return cls._instances[absolute_path] def __init__(self, file_path: Path): self.file_path = file_path.absolute() @property def previous(self) -> Optional[FileInformation]: """ 向前查找该文件的邻居. """ return self._get_neighbor(direction=Direction.BACKWARD) @property def next(self) -> Optional[FileInformation]: """ 向后查找该文件的邻居. """ return self._get_neighbor(direction=Direction.FORWARD) @property def number(self) -> Optional[str]: """ 获取文件名中的番号. """ return get_number(self.file_path.name) @property def root(self) -> FileInformation: """ 获取文件列表的根结点. """ point = self while point: if not point.previous: break point = point.previous return point @property def index(self) -> Optional[int]: """ 查找该文件在所有邻居中的绝对位置. 比如文件名有一组文件: - ``xxx-250-cd1.mp4``, - ``xxx-250-cd2.mp4``, - ``xxx-250-cd3.mp4``. 那么: - ``FileInformation('xxx-250-cd1.mp4').index`` 的值为 ``0``, - ``FileInformation('xxx-250-cd2.mp4').index`` 的值为 ``1``, - ``FileInformation('xxx-250-cd3.mp4').index`` 的值为 ``2``. """ _index = 0 point = self.previous while point: _index += 1 point = point.previous return _index def _get_neighbor(self, direction: Direction) -> Optional[FileInformation]: """ 获取文件的邻居. 有的影片会被切分成多个文件, 用 ``-CD1``, ``-CD2`` 等后缀进行区分, 这些文件属于同一部影片, 彼此互为邻居. 邻居之间存在顺序, 顺序即为路径的字符串升序排列. 如果找不到邻居, 则返回 :py:obj:`None`. :param direction: 如果值为 :py:obj:`Direction.FORWARD` 则向前找最近的一个邻居, 如果值为 :py:obj:`Direction.BACKWARD` 则向后找最近的一个邻居. """ result = match(pattern=r'(?P<prefix>.*[-_][cC][dD])(?P<index>\d+)(?P<suffix>.*)', string=self.file_path.name) if not result: return None group = result.groupdict() next_file_path = self.file_path.with_name(f'{group["prefix"]}{int(group["index"]) + direction.value}{group["suffix"]}') if not next_file_path.is_file(): return None return FileInformation(next_file_path) @property def subtitle(self) -> Subtitle: """ 获取文件对应的字幕. """ has_embedding_subtitle = self.file_path.stem.lower().endswith('-c') or self.file_path.stem.lower().endswith('_c') subtitle_paths = [file_path for file_path in map(self.file_path.with_suffix, ['.ass', '.srt']) if file_path.is_file()] if subtitle_paths and has_embedding_subtitle: return Subtitle(SubtitleType.MIXING, subtitle_paths) if subtitle_paths and (not has_embedding_subtitle): return Subtitle(SubtitleType.EXTERNEL, subtitle_paths) if (not subtitle_paths) and has_embedding_subtitle: return Subtitle(SubtitleType.EMBEDDING, subtitle_paths) return Subtitle(SubtitleType.NONE, subtitle_paths) def __repr__(self) -> str: return f'<file {str(self.file_path)}, {self.number}, {"with" if self.subtitle else "without"} subtitle>' # pragma: no cover def __hash__(self) -> int: return id(self.root) def __eq__(self, other: object) -> bool: """ 判断两个对象是否相等. """ if not isinstance(other, FileInformation): return NotImplemented return self.root is other.root
# Contains functions and helpers to obtain and interact with REDCap API data. import json import requests ################################################################ #### Metadata behavior ################################################################ def _request_metadata(secrets_dict: dict) -> str: '''Makes a REDCap API call for a REDCap project's metadata. Returns the text of the API response. ''' metadata_request = { 'token': secrets_dict['api_key'], 'content': 'metadata', 'format': 'json', } r = requests.post(secrets_dict['url'],data=metadata_request) #print('>>> Metadata request HTTP Status: ' + str(r.status_code)) return r.text def get_metadata(secrets_dict: dict) -> list[dict]: '''Returns a list of dictionaries that contain metadata for a REDCap project's fields. ''' raw_metadata_string = _request_metadata(secrets_dict) md = json.loads(raw_metadata_string) if type(md) == dict and md['error']: print(f"REDCap API returned an error while fetching metadata: {md["error"]}") exit(1) return md def _extract_field(md: list[dict], which_field: str) -> list[str]: '''Parses REDCap metadata and returns a list of variable names of the desired field type. Originally intended for use with radio buttons (which_field='radio') and checkboxes (which_field='checkbox'). ''' result = [] for field in md: if field['field_type'] == which_field: result.append(field['field_name']) return result def get_radio_buttons_checkboxes(md: list[dict]) -> tuple[list[str], list[str]]: '''Returns a 2-tuple of lists: the first a list of radio button fields, and the second a list of checkbox fields (as defined in REDCap metadata dictionary md). ''' return (_extract_field(md, 'radio'), _extract_field(md, 'checkbox')) def get_fields_and_types(md: list[dict]) -> dict[str:str]: '''Returns a dictionary mapping REDCap field names to their REDCap-defined types. ''' result = dict() for field in md: result[field['field_name']] = field['field_type'] return result def get_multiple_choice_text(md: list[dict]) -> dict[str:dict]: '''Returns a dictionary mapping multiple-choice REDCap variable names to a dict of options mapping each option's raw value to its display text. Example: REDCap radio button named 'radio_buttons_1' with values: 1, Option A 2, Option B This function will create a dict like so: texts = {'radio_buttons_1': {'1': 'Option A', '2': 'Option B'}} texts['radio_buttons_1']['1'] == 'Option A' # True ''' texts = dict() for field in md: # First verify if field is a multiple-choice field if ('select_choices_or_calculations' in field and \ type(field) == dict and \ field['select_choices_or_calculations']): # REDCap API returns multiple choice options in the format # "{raw_value}, {display_text} | {raw_value}, {display_text} | ... " # Create the dict that maps raw_value to display_text: sub_dict = dict() choices = field['select_choices_or_calculations'].split(' | ') # Sometimes REDCap skips the spaces between the vertical bar '|' separating choices.... if len(choices) == 1: choices = field['select_choices_or_calculations'].split('|') for option in choices: option_fragments = option.strip().split(', ') # option_fragments[0] is raw_value, everything else is display_text (which could have a ', ' in it) sub_dict[option_fragments[0]] = ', '.join(option_fragments[1:]) texts[field['field_name']] = sub_dict return texts ################################################################ #### Records behavior ################################################################ def _request_record(secrets_dict: dict, redcap_unique_identifier: str, record_id) -> str: '''Makes a REDCap API call for a single record from a REDCap project. Returns the text of the API response. ''' record_request = { 'token': secrets_dict['api_key'], 'content': "record", 'format': "json", 'type': "flat", 'filterLogic': f"[{redcap_unique_identifier}] = '{record_id}'" } r = requests.post(secrets_dict['url'],data=record_request) #print('>>> Record request HTTP Status: ' + str(r.status_code)) return r.text def get_record(secrets_dict: dict, redcap_unique_identifier: str, record_id: str) -> dict: '''Returns a dictionary that contains data of a single REDCap record, identified by the value of record_id in redcap_unique_identifier. ''' raw_record_data = _request_record(secrets_dict, redcap_unique_identifier, record_id) record = json.loads(raw_record_data) if type(record) == dict and record['error']: print(f"REDCap API returned an error while fetching record {record_id}: {record["error"]}") exit(1) if type(record) == list : if len(record) < 1: # REDCap API returns '[]' if no results return from filterLogic despite a 200 OK HTTP code. # filterLogic generates a *list* of records matching that logic. # If filterLogic is incorrect or too strict, there's nothing to return, and that generated list is empty. raise LookupError(f"No records found where '{redcap_unique_identifier}' = {record_id}") elif len(record) > 1: raise LookupError(f"Multiple records found where '{redcap_unique_identifier}' = {record_id} (expected only 1)") return record[0] return record
# Contains functions and helpers to obtain and interact with REDCap API data. import json import requests ################################################################ #### Metadata behavior ################################################################ def _request_metadata(secrets_dict: dict) -> str: '''Makes a REDCap API call for a REDCap project's metadata. Returns the text of the API response. ''' metadata_request = { 'token': secrets_dict['api_key'], 'content': 'metadata', 'format': 'json', } r = requests.post(secrets_dict['url'],data=metadata_request) #print('>>> Metadata request HTTP Status: ' + str(r.status_code)) return r.text def get_metadata(secrets_dict: dict) -> list[dict]: '''Returns a list of dictionaries that contain metadata for a REDCap project's fields. ''' raw_metadata_string = _request_metadata(secrets_dict) md = json.loads(raw_metadata_string) if type(md) == dict and md['error']: print(f"REDCap API returned an error while fetching metadata: {md['error']}") exit(1) return md def _extract_field(md: list[dict], which_field: str) -> list[str]: '''Parses REDCap metadata and returns a list of variable names of the desired field type. Originally intended for use with radio buttons (which_field='radio') and checkboxes (which_field='checkbox'). ''' result = [] for field in md: if field['field_type'] == which_field: result.append(field['field_name']) return result def get_radio_buttons_checkboxes(md: list[dict]) -> tuple[list[str], list[str]]: '''Returns a 2-tuple of lists: the first a list of radio button fields, and the second a list of checkbox fields (as defined in REDCap metadata dictionary md). ''' return (_extract_field(md, 'radio'), _extract_field(md, 'checkbox')) def get_fields_and_types(md: list[dict]) -> dict[str:str]: '''Returns a dictionary mapping REDCap field names to their REDCap-defined types. ''' result = dict() for field in md: result[field['field_name']] = field['field_type'] return result def get_multiple_choice_text(md: list[dict]) -> dict[str:dict]: '''Returns a dictionary mapping multiple-choice REDCap variable names to a dict of options mapping each option's raw value to its display text. Example: REDCap radio button named 'radio_buttons_1' with values: 1, Option A 2, Option B This function will create a dict like so: texts = {'radio_buttons_1': {'1': 'Option A', '2': 'Option B'}} texts['radio_buttons_1']['1'] == 'Option A' # True ''' texts = dict() for field in md: # First verify if field is a multiple-choice field if ('select_choices_or_calculations' in field and \ type(field) == dict and \ field['select_choices_or_calculations']): # REDCap API returns multiple choice options in the format # "{raw_value}, {display_text} | {raw_value}, {display_text} | ... " # Create the dict that maps raw_value to display_text: sub_dict = dict() choices = field['select_choices_or_calculations'].split(' | ') # Sometimes REDCap skips the spaces between the vertical bar '|' separating choices.... if len(choices) == 1: choices = field['select_choices_or_calculations'].split('|') for option in choices: option_fragments = option.strip().split(', ') # option_fragments[0] is raw_value, everything else is display_text (which could have a ', ' in it) sub_dict[option_fragments[0]] = ', '.join(option_fragments[1:]) texts[field['field_name']] = sub_dict return texts ################################################################ #### Records behavior ################################################################ def _request_record(secrets_dict: dict, redcap_unique_identifier: str, record_id) -> str: '''Makes a REDCap API call for a single record from a REDCap project. Returns the text of the API response. ''' record_request = { 'token': secrets_dict['api_key'], 'content': "record", 'format': "json", 'type': "flat", 'filterLogic': f"[{redcap_unique_identifier}] = '{record_id}'" } r = requests.post(secrets_dict['url'],data=record_request) #print('>>> Record request HTTP Status: ' + str(r.status_code)) return r.text def get_record(secrets_dict: dict, redcap_unique_identifier: str, record_id: str) -> dict: '''Returns a dictionary that contains data of a single REDCap record, identified by the value of record_id in redcap_unique_identifier. ''' raw_record_data = _request_record(secrets_dict, redcap_unique_identifier, record_id) record = json.loads(raw_record_data) if type(record) == dict and record['error']: print(f"REDCap API returned an error while fetching record {record_id}: {record['error']}") exit(1) if type(record) == list : if len(record) < 1: # REDCap API returns '[]' if no results return from filterLogic despite a 200 OK HTTP code. # filterLogic generates a *list* of records matching that logic. # If filterLogic is incorrect or too strict, there's nothing to return, and that generated list is empty. raise LookupError(f"No records found where '{redcap_unique_identifier}' = {record_id}") elif len(record) > 1: raise LookupError(f"Multiple records found where '{redcap_unique_identifier}' = {record_id} (expected only 1)") return record[0] return record
#!/usr/bin/env python3 """ eGenix Antispam Bot for Telegram Challenges Written by Marc-Andre Lemburg in 2022. Copyright (c) 2022, eGenix.com Software GmbH; mailto:info@egenix.com License: MIT """ import random import re from telegram_antispam_bot.config import ( CHALLENGE_CHARS, CHALLENGE_LENGTH, DEBUG, ) ### Globals # Debug level _debug = DEBUG ### Challenge class class Challenge: """ Challenge for the user to answer correctly. The base implementation simply creates a short text snippet, which the user will have to enter. Case does not matter. Other implementations are possible, provided the API is kept compatible. """ # Client using this Challenge instance client = None # Characters to use for challenge strings challenge_chars = CHALLENGE_CHARS # Length of challenge strings challenge_length = CHALLENGE_LENGTH # Expected answer as regular expression answer = '' def __init__(self, client, message): """ Create a challenge instance. client has to point to the AntispamBot instance. message needs to point to the member's signup message and can be used for creating the challenge. Note: message should not be stored in the instance to avoid creating circular references. """ self.client = client def create_challenge(self, message): """ Create a challenge text to send to the user and the expected answer. Returns (challenge, answer). """ answer = ''.join( random.choices( self.challenge_chars, k=self.challenge_length)).upper() return ( f'`{answer}`', # we format this verbatim to make easier to spot f'(?i)^{answer}$' # case is not important for the answer ) async def send(self, message): """ Send a message to the new user, asking to answer a challenge. message needs to point to the member's signup message. client has to point to the AntispamBot instance. """ # Create challenge text and answer challenge, self.answer = self.create_challenge(message) # Send challenge string message.conversation.append( await self.client.send_message( message.chat.id, f'Welcome to the chat, {message.new_member.first_name} ! ' f'Please enter {challenge} into this chat ' f'to get approved as a member ' f'(within the next few seconds).', reply_to_message_id=message.message_id)) def check(self, answer): """ Check the user's answer to the challenge and return True/False depending on whether it matches or not. answer needs to point to the user's answer message. """ # Entered value value = answer.text.strip() # Check against snippet if _debug: self.client.log(f'Checking entered value {value!r} against {self.answer}') if re.match(self.answer, value) is not None: return True return False class UppercaseChallenge(Challenge): """ Enter all uppercase chars as challenge. """ def create_challenge(self, message): lower = random.choices( self.challenge_chars.lower(), k=self.challenge_length) upper = random.choices( re.sub('[^A-Z]+', '', self.challenge_chars.upper()), k=self.challenge_length) challenge = lower + upper random.shuffle(challenge) answer = [x for x in challenge if x.isupper()] return ( f'all uppercase characters found in `{''.join(challenge)}`', f'(?i)^{''.join(answer)}$' ) class ReverseStringChallenge(Challenge): """ Reverse a string as challenge. """ def create_challenge(self, message): challenge = random.choices( self.challenge_chars.upper(), k=self.challenge_length) answer = reversed(challenge) return ( f'the reversed version of `{''.join(challenge)}`', f'(?i)^{''.join(answer)}$' ) class MathAddChallenge(Challenge): """ Solve a math addition as challenge. """ def create_challenge(self, message): a = random.randint(1, 100) b = random.randint(1, 100) return ( f'the result of `{a} + {b}`', f'^{str(a + b)}$' ) class ListItemChallenge(Challenge): """ Figure out Python list indexing as challenge. """ def create_challenge(self, message): l = random.sample(range(10), k=6) i = random.randint(0, 5) return ( f'the result of the Python expression `{l!r}[{i}]`', f'^{str(l[i])}$' )
#!/usr/bin/env python3 """ eGenix Antispam Bot for Telegram Challenges Written by Marc-Andre Lemburg in 2022. Copyright (c) 2022, eGenix.com Software GmbH; mailto:info@egenix.com License: MIT """ import random import re from telegram_antispam_bot.config import ( CHALLENGE_CHARS, CHALLENGE_LENGTH, DEBUG, ) ### Globals # Debug level _debug = DEBUG ### Challenge class class Challenge: """ Challenge for the user to answer correctly. The base implementation simply creates a short text snippet, which the user will have to enter. Case does not matter. Other implementations are possible, provided the API is kept compatible. """ # Client using this Challenge instance client = None # Characters to use for challenge strings challenge_chars = CHALLENGE_CHARS # Length of challenge strings challenge_length = CHALLENGE_LENGTH # Expected answer as regular expression answer = '' def __init__(self, client, message): """ Create a challenge instance. client has to point to the AntispamBot instance. message needs to point to the member's signup message and can be used for creating the challenge. Note: message should not be stored in the instance to avoid creating circular references. """ self.client = client def create_challenge(self, message): """ Create a challenge text to send to the user and the expected answer. Returns (challenge, answer). """ answer = ''.join( random.choices( self.challenge_chars, k=self.challenge_length)).upper() return ( f'`{answer}`', # we format this verbatim to make easier to spot f'(?i)^{answer}$' # case is not important for the answer ) async def send(self, message): """ Send a message to the new user, asking to answer a challenge. message needs to point to the member's signup message. client has to point to the AntispamBot instance. """ # Create challenge text and answer challenge, self.answer = self.create_challenge(message) # Send challenge string message.conversation.append( await self.client.send_message( message.chat.id, f'Welcome to the chat, {message.new_member.first_name} ! ' f'Please enter {challenge} into this chat ' f'to get approved as a member ' f'(within the next few seconds).', reply_to_message_id=message.message_id)) def check(self, answer): """ Check the user's answer to the challenge and return True/False depending on whether it matches or not. answer needs to point to the user's answer message. """ # Entered value value = answer.text.strip() # Check against snippet if _debug: self.client.log(f'Checking entered value {value!r} against {self.answer}') if re.match(self.answer, value) is not None: return True return False class UppercaseChallenge(Challenge): """ Enter all uppercase chars as challenge. """ def create_challenge(self, message): lower = random.choices( self.challenge_chars.lower(), k=self.challenge_length) upper = random.choices( re.sub('[^A-Z]+', '', self.challenge_chars.upper()), k=self.challenge_length) challenge = lower + upper random.shuffle(challenge) answer = [x for x in challenge if x.isupper()] return ( f'all uppercase characters found in `{"".join(challenge)}`', f'(?i)^{"".join(answer)}$' ) class ReverseStringChallenge(Challenge): """ Reverse a string as challenge. """ def create_challenge(self, message): challenge = random.choices( self.challenge_chars.upper(), k=self.challenge_length) answer = reversed(challenge) return ( f'the reversed version of `{"".join(challenge)}`', f'(?i)^{"".join(answer)}$' ) class MathAddChallenge(Challenge): """ Solve a math addition as challenge. """ def create_challenge(self, message): a = random.randint(1, 100) b = random.randint(1, 100) return ( f'the result of `{a} + {b}`', f'^{str(a + b)}$' ) class ListItemChallenge(Challenge): """ Figure out Python list indexing as challenge. """ def create_challenge(self, message): l = random.sample(range(10), k=6) i = random.randint(0, 5) return ( f'the result of the Python expression `{l!r}[{i}]`', f'^{str(l[i])}$' )
import rebase.util.api_request as api_request import requests import json import dill import rebase as rb def create(site_id, model): """Create a new model for the specified site Args: site_id (str): the id of the site model (class): the model class to create Returns: str: - Example:: class MyModel(rb.Model): # your code ... site_id = '4ab82692-3944-4069-9cbb-f9c59513c1c3' rb.create(site_id, MyModel) """ data = dill.dumps(model, recurse=True) params = {'model_name': model.__name__} path = 'platform/v1/model/custom/create/{}'.format(site_id) r = api_request.post(path, params=params, data=data) if r.status_code != 200: raise Exception(f"Error creating model for site {site_id}: {r.content.decode("utf-8")}") return r.json() def update(model_id, model): """Update an existing model for a specified site Args: site_id (str): the id of the site model (class): the model class to create Returns: str: - Example:: class MyNewModel(rb.Model): # your code ... model_id = '4ab82692-3944-4069-9cbb-f9c59513c1c3' rb.update(model_id, MyNewModel) """ data = dill.dumps(model, recurse=True) params = {'model_name': model.__name__} path = 'platform/v1/model/custom/update/{}'.format(model_id) r = api_request.post(path, params=params, data=data) if r.status_code == 200: print('Ok, updated model {}'.format(model_id)) else: raise Exception('Failed updating model {}'.format(model_id)) def train(model_id, start_date, end_date): """Trigger the training for a specific model Args: model_id (str): the id of the model start_date (datetime): the start date of training period end_date (datetime): the end date of training period Returns: str: - Example:: from datetime import datetime model_id = 'd9ed55d2-4c7f-4486-a55d-fba8cb2c8791' start_date = datetime(2020, 2, 3, 0, 0) end_date = datetime(2021, 1, 4, 0, 0) rb.train(model_id, start_date, end_date) """ path = 'platform/v1/model/train/{}'.format(model_id) data = { 'start_date': start_date, 'end_date': end_date } r = api_request.post(path, data=json.dumps(data)) if r.status_code != 200: raise Exception(f"Error starting train for model {model_id}: {r.content.decode("utf-8")}") return r.content.decode('utf-8') def hyperparam_search(model_id, params={}, hyperparams={}, n_trials=10, compute_params={}): path = 'platform/v1/model/hyperparam_search/{}'.format(model_id) params['model_id'] = model_id params['api_key'] = rb.api_key data = { 'params': params, 'hyperparams': hyperparams, 'n_trials': n_trials, 'compute_params': compute_params } r = api_request.post(path, data=json.dumps(data)) if r.status_code != 200: raise Exception(f"Error starting hyperparam_search for model {model_id}: {r.content.decode("utf-8")}") return r.json() def report_result(model_id, job_name=None, params={}, score=None, exception=None): path = 'platform/v1/model/hyperparam_result/{}'.format(model_id) params['model_id'] = model_id params['api_key'] = rb.api_key data = { 'job_name': job_name, 'params': params, 'score': score, 'exception': exception } r = api_request.post(path, data=json.dumps(data)) if r.status_code != 200: raise Exception(f"Error reporting hyperparam result for model {model_id}: {r.content.decode("utf-8")}") return r.json() def hyperparam_results(model_id, task_key): r = api_request.get('platform/v1/model/hyperparam_allresults/{}'.format(model_id), params={'api_key': rb.api_key, 'task_key': task_key}) if r.status_code != 200: raise Exception(f"Error starting hyperparam_results for model {model_id}: {r.content.decode("utf-8")}") return r.json()
import rebase.util.api_request as api_request import requests import json import dill import rebase as rb def create(site_id, model): """Create a new model for the specified site Args: site_id (str): the id of the site model (class): the model class to create Returns: str: - Example:: class MyModel(rb.Model): # your code ... site_id = '4ab82692-3944-4069-9cbb-f9c59513c1c3' rb.create(site_id, MyModel) """ data = dill.dumps(model, recurse=True) params = {'model_name': model.__name__} path = 'platform/v1/model/custom/create/{}'.format(site_id) r = api_request.post(path, params=params, data=data) if r.status_code != 200: raise Exception(f"Error creating model for site {site_id}: {r.content.decode('utf-8')}") return r.json() def update(model_id, model): """Update an existing model for a specified site Args: site_id (str): the id of the site model (class): the model class to create Returns: str: - Example:: class MyNewModel(rb.Model): # your code ... model_id = '4ab82692-3944-4069-9cbb-f9c59513c1c3' rb.update(model_id, MyNewModel) """ data = dill.dumps(model, recurse=True) params = {'model_name': model.__name__} path = 'platform/v1/model/custom/update/{}'.format(model_id) r = api_request.post(path, params=params, data=data) if r.status_code == 200: print('Ok, updated model {}'.format(model_id)) else: raise Exception('Failed updating model {}'.format(model_id)) def train(model_id, start_date, end_date): """Trigger the training for a specific model Args: model_id (str): the id of the model start_date (datetime): the start date of training period end_date (datetime): the end date of training period Returns: str: - Example:: from datetime import datetime model_id = 'd9ed55d2-4c7f-4486-a55d-fba8cb2c8791' start_date = datetime(2020, 2, 3, 0, 0) end_date = datetime(2021, 1, 4, 0, 0) rb.train(model_id, start_date, end_date) """ path = 'platform/v1/model/train/{}'.format(model_id) data = { 'start_date': start_date, 'end_date': end_date } r = api_request.post(path, data=json.dumps(data)) if r.status_code != 200: raise Exception(f"Error starting train for model {model_id}: {r.content.decode('utf-8')}") return r.content.decode('utf-8') def hyperparam_search(model_id, params={}, hyperparams={}, n_trials=10, compute_params={}): path = 'platform/v1/model/hyperparam_search/{}'.format(model_id) params['model_id'] = model_id params['api_key'] = rb.api_key data = { 'params': params, 'hyperparams': hyperparams, 'n_trials': n_trials, 'compute_params': compute_params } r = api_request.post(path, data=json.dumps(data)) if r.status_code != 200: raise Exception(f"Error starting hyperparam_search for model {model_id}: {r.content.decode('utf-8')}") return r.json() def report_result(model_id, job_name=None, params={}, score=None, exception=None): path = 'platform/v1/model/hyperparam_result/{}'.format(model_id) params['model_id'] = model_id params['api_key'] = rb.api_key data = { 'job_name': job_name, 'params': params, 'score': score, 'exception': exception } r = api_request.post(path, data=json.dumps(data)) if r.status_code != 200: raise Exception(f"Error reporting hyperparam result for model {model_id}: {r.content.decode('utf-8')}") return r.json() def hyperparam_results(model_id, task_key): r = api_request.get('platform/v1/model/hyperparam_allresults/{}'.format(model_id), params={'api_key': rb.api_key, 'task_key': task_key}) if r.status_code != 200: raise Exception(f"Error starting hyperparam_results for model {model_id}: {r.content.decode('utf-8')}") return r.json()
"""Defines the linter class.""" import os import time import logging from typing import ( Any, Generator, List, Sequence, Optional, Tuple, Union, cast, Iterable, ) import pathspec from sqlfluff.core.errors import ( SQLBaseError, SQLLexError, SQLLintError, SQLParseError, SQLTemplaterSkipFile, ) from sqlfluff.core.parser import Lexer, Parser from sqlfluff.core.file_helpers import get_encoding from sqlfluff.core.templaters import TemplatedFile from sqlfluff.core.rules import get_ruleset from sqlfluff.core.config import FluffConfig, ConfigLoader # Classes needed only for type checking from sqlfluff.core.linter.runner import get_runner from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.core.parser.segments.meta import MetaSegment from sqlfluff.core.parser.segments.raw import RawSegment from sqlfluff.core.rules.base import BaseRule from sqlfluff.core.linter.common import ( RuleTuple, ParsedString, NoQaDirective, RenderedFile, ) from sqlfluff.core.linter.linted_file import LintedFile from sqlfluff.core.linter.linted_dir import LintedDir from sqlfluff.core.linter.linting_result import LintingResult WalkableType = Iterable[Tuple[str, Optional[List[str]], List[str]]] # Instantiate the linter logger linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter") class Linter: """The interface class to interact with the linter.""" # Default to allowing process parallelism allow_process_parallelism = True def __init__( self, config: Optional[FluffConfig] = None, formatter: Any = None, dialect: Optional[str] = None, rules: Optional[Union[str, List[str]]] = None, user_rules: Optional[Union[str, List[str]]] = None, ) -> None: # Store the config object self.config = FluffConfig.from_kwargs( config=config, dialect=dialect, rules=rules ) # Get the dialect and templater self.dialect = self.config.get("dialect_obj") self.templater = self.config.get("templater_obj") # Store the formatter for output self.formatter = formatter # Store references to user rule classes self.user_rules = user_rules or [] def get_ruleset(self, config: Optional[FluffConfig] = None) -> List[BaseRule]: """Get hold of a set of rules.""" rs = get_ruleset() # Register any user rules for rule in self.user_rules: rs.register(rule) cfg = config or self.config return rs.get_rulelist(config=cfg) def rule_tuples(self) -> List[RuleTuple]: """A simple pass through to access the rule tuples of the rule set.""" rs = self.get_ruleset() return [RuleTuple(rule.code, rule.description) for rule in rs] # #### Static methods # These are the building blocks of the linting process. @staticmethod def _load_raw_file_and_config(fname, root_config): """Load a raw file and the associated config.""" file_config = root_config.make_child_from_path(fname) encoding = get_encoding(fname=fname, config=file_config) with open(fname, encoding=encoding, errors="backslashreplace") as target_file: raw_file = target_file.read() # Scan the raw file for config commands. file_config.process_raw_file_for_config(raw_file) # Return the raw file and config return raw_file, file_config, encoding @staticmethod def _lex_templated_file( templated_file: TemplatedFile, config: FluffConfig ) -> Tuple[Optional[Sequence[BaseSegment]], List[SQLLexError], FluffConfig]: """Lex a templated file. NOTE: This potentially mutates the config, so make sure to use the returned one. """ violations = [] linter_logger.info("LEXING RAW (%s)", templated_file.fname) # Get the lexer lexer = Lexer(config=config) # Lex the file and log any problems try: tokens, lex_vs = lexer.lex(templated_file) # We might just get the violations as a list violations += lex_vs linter_logger.info( "Lexed tokens: %s", [seg.raw for seg in tokens] if tokens else None ) except SQLLexError as err: linter_logger.info("LEXING FAILED! (%s): %s", templated_file.fname, err) violations.append(err) return None, violations, config if not tokens: # pragma: no cover TODO? return None, violations, config # Check that we've got sensible indentation from the lexer. # We might need to suppress if it's a complicated file. templating_blocks_indent = config.get("template_blocks_indent", "indentation") if isinstance(templating_blocks_indent, str): force_block_indent = templating_blocks_indent.lower().strip() == "force" else: force_block_indent = False templating_blocks_indent = bool(templating_blocks_indent) # If we're forcing it through we don't check. if templating_blocks_indent and not force_block_indent: indent_balance = sum( getattr(elem, "indent_val", 0) for elem in cast(Tuple[BaseSegment, ...], tokens) ) if indent_balance != 0: linter_logger.debug( "Indent balance test failed for %r. Template indents will not be linted for this file.", templated_file.fname, ) # Don't enable the templating blocks. templating_blocks_indent = False # Disable the linting of L003 on templated tokens. config.set_value(["rules", "L003", "lint_templated_tokens"], False) # The file will have been lexed without config, so check all indents # are enabled. new_tokens = [] for token in cast(Tuple[BaseSegment, ...], tokens): if token.is_meta: token = cast(MetaSegment, token) if token.indent_val != 0: # Don't allow it if we're not linting templating block indents. if not templating_blocks_indent: continue new_tokens.append(token) # Return new buffer return new_tokens, violations, config @staticmethod def _parse_tokens( tokens: Sequence[BaseSegment], config: FluffConfig, recurse: bool = True, fname: Optional[str] = None, ) -> Tuple[Optional[BaseSegment], List[SQLParseError]]: parser = Parser(config=config) violations = [] # Parse the file and log any problems try: parsed: Optional[BaseSegment] = parser.parse( tokens, recurse=recurse, fname=fname ) except SQLParseError as err: linter_logger.info("PARSING FAILED! : %s", err) violations.append(err) return None, violations if parsed: linter_logger.info("\n###\n#\n# {}\n#\n###".format("Parsed Tree:")) linter_logger.info("\n" + parsed.stringify()) # We may succeed parsing, but still have unparsable segments. Extract them here. for unparsable in parsed.iter_unparsables(): # No exception has been raised explicitly, but we still create one here # so that we can use the common interface violations.append( SQLParseError( "Line {0[0]}, Position {0[1]}: Found unparsable section: {1!r}".format( unparsable.pos_marker.working_loc, unparsable.raw if len(unparsable.raw) < 40 else unparsable.raw[:40] + "...", ), segment=unparsable, ) ) linter_logger.info("Found unparsable segment...") linter_logger.info(unparsable.stringify()) return parsed, violations @staticmethod def parse_noqa(comment: str, line_no: int): """Extract ignore mask entries from a comment string.""" # Also trim any whitespace afterward if comment.startswith("noqa"): # This is an ignore identifier comment_remainder = comment[4:] if comment_remainder: if not comment_remainder.startswith(":"): return SQLParseError( "Malformed 'noqa' section. Expected 'noqa: <rule>[,...]", line_no=line_no, ) comment_remainder = comment_remainder[1:].strip() if comment_remainder: action: Optional[str] if "=" in comment_remainder: action, rule_part = comment_remainder.split("=", 1) if action not in {"disable", "enable"}: # pragma: no cover return SQLParseError( "Malformed 'noqa' section. " "Expected 'noqa: enable=<rule>[,...] | all' " "or 'noqa: disable=<rule>[,...] | all", line_no=line_no, ) else: action = None rule_part = comment_remainder if rule_part in {"disable", "enable"}: return SQLParseError( "Malformed 'noqa' section. " "Expected 'noqa: enable=<rule>[,...] | all' " "or 'noqa: disable=<rule>[,...] | all", line_no=line_no, ) rules: Optional[Tuple[str, ...]] if rule_part != "all": rules = tuple(r.strip() for r in rule_part.split(",")) else: rules = None return NoQaDirective(line_no, rules, action) return NoQaDirective(line_no, None, None) return None @staticmethod def remove_templated_errors( linting_errors: List[SQLBaseError], ) -> List[SQLBaseError]: """Filter a list of lint errors, removing those which only occur in templated slices.""" # Filter out any linting errors in templated sections if relevant. result: List[SQLBaseError] = [] for e in linting_errors: if isinstance(e, SQLLintError): if ( # Is it in a literal section? e.segment.pos_marker.is_literal() # Is it a rule that is designed to work on templated sections? or e.rule.targets_templated ): result.append(e) else: # If it's another type, just keep it. (E.g. SQLParseError from # malformed "noqa" comment). result.append(e) return result @staticmethod def _warn_unfixable(code: str): linter_logger.warning( f"One fix for {code} not applied, it would re-cause the same error." ) # ### Class Methods # These compose the base static methods into useful recipes. @classmethod def parse_rendered(cls, rendered: RenderedFile, recurse: bool = True): """Parse a rendered file.""" t0 = time.monotonic() violations = cast(List[SQLBaseError], rendered.templater_violations) tokens: Optional[Sequence[BaseSegment]] if rendered.templated_file: tokens, lvs, config = cls._lex_templated_file( rendered.templated_file, rendered.config ) violations += lvs else: tokens = None t1 = time.monotonic() linter_logger.info("PARSING (%s)", rendered.fname) if tokens: parsed, pvs = cls._parse_tokens( tokens, rendered.config, recurse=recurse, fname=rendered.fname ) violations += pvs else: parsed = None time_dict = { **rendered.time_dict, "lexing": t1 - t0, "parsing": time.monotonic() - t1, } return ParsedString( parsed, violations, time_dict, rendered.templated_file, rendered.config, rendered.fname, ) @classmethod def extract_ignore_from_comment(cls, comment: RawSegment): """Extract ignore mask entries from a comment segment.""" # Also trim any whitespace afterward comment_content = comment.raw_trimmed().strip() comment_line, _ = comment.pos_marker.source_position() result = cls.parse_noqa(comment_content, comment_line) if isinstance(result, SQLParseError): result.segment = comment return result @classmethod def extract_ignore_mask( cls, tree: BaseSegment ) -> Tuple[List[NoQaDirective], List[SQLBaseError]]: """Look for inline ignore comments and return NoQaDirectives.""" ignore_buff: List[NoQaDirective] = [] violations: List[SQLBaseError] = [] for comment in tree.recursive_crawl("comment"): if comment.name == "inline_comment": ignore_entry = cls.extract_ignore_from_comment(comment) if isinstance(ignore_entry, SQLParseError): violations.append(ignore_entry) elif ignore_entry: ignore_buff.append(ignore_entry) if ignore_buff: linter_logger.info("Parsed noqa directives from file: %r", ignore_buff) return ignore_buff, violations @classmethod def lint_fix_parsed( cls, tree: BaseSegment, config: FluffConfig, rule_set: List[BaseRule], fix: bool = False, fname: Optional[str] = None, templated_file: Optional[TemplatedFile] = None, formatter: Any = None, ) -> Tuple[BaseSegment, List[SQLBaseError], List[NoQaDirective]]: """Lint and optionally fix a tree object.""" # Keep track of the linting errors all_linting_errors = [] # A placeholder for the fixes we had on the previous loop last_fixes = None # Keep a set of previous versions to catch infinite loops. previous_versions = {tree.raw} # If we are fixing then we want to loop up to the runaway_limit, otherwise just once for linting. loop_limit = config.get("runaway_limit") if fix else 1 # Dispatch the output for the lint header if formatter: formatter.dispatch_lint_header(fname) # Look for comment segments which might indicate lines to ignore. ignore_buff, ivs = cls.extract_ignore_mask(tree) all_linting_errors += ivs for loop in range(loop_limit): changed = False for crawler in rule_set: # fixes should be a dict {} with keys edit, delete, create # delete is just a list of segments to delete # edit and create are list of tuples. The first element is the # "anchor", the segment to look for either to edit or to insert BEFORE. # The second is the element to insert or create. linting_errors, _, fixes, _ = crawler.crawl( tree, ignore_mask=ignore_buff, dialect=config.get("dialect_obj"), fname=fname, templated_file=templated_file, ) all_linting_errors += linting_errors if fix and fixes: linter_logger.info(f"Applying Fixes [{crawler.code}]: {fixes}") # Do some sanity checks on the fixes before applying. if fixes == last_fixes: # pragma: no cover cls._warn_unfixable(crawler.code) else: last_fixes = fixes new_tree, _ = tree.apply_fixes(fixes) # Check for infinite loops if new_tree.raw not in previous_versions: # We've not seen this version of the file so far. Continue. tree = new_tree previous_versions.add(tree.raw) changed = True continue else: # Applying these fixes took us back to a state which we've # seen before. Abort. cls._warn_unfixable(crawler.code) if loop == 0: # Keep track of initial errors for reporting. initial_linting_errors = all_linting_errors.copy() if fix and not changed: # We did not change the file. Either the file is clean (no fixes), or # any fixes which are present will take us back to a previous state. linter_logger.info( f"Fix loop complete. Stability achieved after {loop}/{loop_limit} loops." ) break if fix and loop + 1 == loop_limit: linter_logger.warning(f"Loop limit on fixes reached [{loop_limit}].") if config.get("ignore_templated_areas", default=True): initial_linting_errors = cls.remove_templated_errors(initial_linting_errors) return tree, initial_linting_errors, ignore_buff @classmethod def lint_parsed( cls, parsed: ParsedString, rule_set: List[BaseRule], fix: bool = False, formatter: Any = None, encoding: str = "utf8", ): """Lint a ParsedString and return a LintedFile.""" violations = parsed.violations time_dict = parsed.time_dict tree: Optional[BaseSegment] if parsed.tree: t0 = time.monotonic() linter_logger.info("LINTING (%s)", parsed.fname) tree, initial_linting_errors, ignore_buff = cls.lint_fix_parsed( parsed.tree, config=parsed.config, rule_set=rule_set, fix=fix, fname=parsed.fname, templated_file=parsed.templated_file, formatter=formatter, ) # Update the timing dict time_dict["linting"] = time.monotonic() - t0 # We're only going to return the *initial* errors, rather # than any generated during the fixing cycle. violations += initial_linting_errors else: # If no parsed tree, set to None tree = None ignore_buff = [] # We process the ignore config here if appropriate for violation in violations: violation.ignore_if_in(parsed.config.get("ignore")) linted_file = LintedFile( parsed.fname, violations, time_dict, tree, ignore_mask=ignore_buff, templated_file=parsed.templated_file, encoding=encoding, ) # This is the main command line output from linting. if formatter: formatter.dispatch_file_violations( parsed.fname, linted_file, only_fixable=fix ) # Safety flag for unset dialects if parsed.config.get("dialect") == "ansi" and linted_file.get_violations( fixable=True if fix else None, types=SQLParseError ): if formatter: # pragma: no cover TODO? formatter.dispatch_dialect_warning() return linted_file @classmethod def lint_rendered( cls, rendered: RenderedFile, rule_set: List[BaseRule], fix: bool = False, formatter: Any = None, ) -> LintedFile: """Take a RenderedFile and return a LintedFile.""" parsed = cls.parse_rendered(rendered) return cls.lint_parsed( parsed, rule_set=rule_set, fix=fix, formatter=formatter, encoding=rendered.encoding, ) # ### Instance Methods # These are tied to a specific instance and so are not necessarily # safe to use in parallel operations. def render_string( self, in_str: str, fname: str, config: FluffConfig, encoding: str ) -> RenderedFile: """Template the file.""" linter_logger.info("TEMPLATING RAW [%s] (%s)", self.templater.name, fname) # Start the templating timer t0 = time.monotonic() if not config.get("templater_obj") == self.templater: linter_logger.warning( ( f"Attempt to set templater to {config.get("templater_obj").name} failed. Using {self.templater.name} " "templater. Templater cannot be set in a .sqlfluff file in a subdirectory of the current working " "directory. It can be set in a .sqlfluff in the current working directory. See Nesting section of the " "docs for more details." ) ) try: templated_file, templater_violations = self.templater.process( in_str=in_str, fname=fname, config=config, formatter=self.formatter ) except SQLTemplaterSkipFile as s: # pragma: no cover linter_logger.warning(str(s)) templated_file = None templater_violations = [] if not templated_file: linter_logger.info("TEMPLATING FAILED: %s", templater_violations) # Record time time_dict = {"templating": time.monotonic() - t0} return RenderedFile( templated_file, templater_violations, config, time_dict, fname, encoding ) def render_file(self, fname: str, root_config: FluffConfig) -> RenderedFile: """Load and render a file with relevant config.""" # Load the raw file. raw_file, config, encoding = self._load_raw_file_and_config(fname, root_config) # Render the file return self.render_string(raw_file, fname, config, encoding) def parse_string( self, in_str: str, fname: str = "<string>", recurse: bool = True, config: Optional[FluffConfig] = None, encoding: str = "utf-8", ) -> ParsedString: """Parse a string.""" violations: List[SQLBaseError] = [] # Dispatch the output for the template header (including the config diff) if self.formatter: self.formatter.dispatch_template_header(fname, self.config, config) # Just use the local config from here: config = config or self.config # Scan the raw file for config commands. config.process_raw_file_for_config(in_str) rendered = self.render_string(in_str, fname, config, encoding) violations += rendered.templater_violations # Dispatch the output for the parse header if self.formatter: self.formatter.dispatch_parse_header(fname) return self.parse_rendered(rendered, recurse=recurse) def fix( self, tree: BaseSegment, config: Optional[FluffConfig] = None, fname: Optional[str] = None, templated_file: Optional[TemplatedFile] = None, ) -> Tuple[BaseSegment, List[SQLBaseError]]: """Return the fixed tree and violations from lintfix when we're fixing.""" config = config or self.config rule_set = self.get_ruleset(config=config) fixed_tree, violations, _ = self.lint_fix_parsed( tree, config, rule_set, fix=True, fname=fname, templated_file=templated_file, formatter=self.formatter, ) return fixed_tree, violations def lint( self, tree: BaseSegment, config: Optional[FluffConfig] = None, fname: Optional[str] = None, templated_file: Optional[TemplatedFile] = None, ) -> List[SQLBaseError]: """Return just the violations from lintfix when we're only linting.""" config = config or self.config rule_set = self.get_ruleset(config=config) _, violations, _ = self.lint_fix_parsed( tree, config, rule_set, fix=False, fname=fname, templated_file=templated_file, formatter=self.formatter, ) return violations def lint_string( self, in_str: str = "", fname: str = "<string input>", fix: bool = False, config: Optional[FluffConfig] = None, encoding: str = "utf8", ) -> LintedFile: """Lint a string. Returns: :obj:`LintedFile`: an object representing that linted file. """ # Sort out config, defaulting to the built in config if no override config = config or self.config # Parse the string. parsed = self.parse_string(in_str=in_str, fname=fname, config=config) # Get rules as appropriate rule_set = self.get_ruleset(config=config) # Lint the file and return the LintedFile return self.lint_parsed( parsed, rule_set, fix=fix, formatter=self.formatter, encoding=encoding ) def paths_from_path( self, path: str, ignore_file_name: str = ".sqlfluffignore", ignore_non_existent_files: bool = False, ignore_files: bool = True, working_path: str = os.getcwd(), ) -> List[str]: """Return a set of sql file paths from a potentially more ambiguous path string. Here we also deal with the .sqlfluffignore file if present. When a path to a file to be linted is explicitly passed we look for ignore files in all directories that are parents of the file, up to the current directory. If the current directory is not a parent of the file we only look for an ignore file in the direct parent of the file. """ if not os.path.exists(path): if ignore_non_existent_files: return [] else: raise OSError("Specified path does not exist") # Files referred to exactly are also ignored if # matched, but we warn the users when that happens is_exact_file = os.path.isfile(path) if is_exact_file: # When the exact file to lint is passed, we # fill path_walk with an input that follows # the structure of `os.walk`: # (root, directories, files) dirpath = os.path.dirname(path) files = [os.path.basename(path)] ignore_file_paths = ConfigLoader.find_ignore_config_files( path=path, working_path=working_path, ignore_file_name=ignore_file_name ) # Add paths that could contain "ignore files" # to the path_walk list path_walk_ignore_file = [ ( os.path.dirname(ignore_file_path), None, # Only one possible file, since we only # have one "ignore file name" [os.path.basename(ignore_file_path)], ) for ignore_file_path in ignore_file_paths ] path_walk: WalkableType = [(dirpath, None, files)] + path_walk_ignore_file else: path_walk = os.walk(path) # If it's a directory then expand the path! buffer = [] ignore_set = set() for dirpath, _, filenames in path_walk: for fname in filenames: fpath = os.path.join(dirpath, fname) # Handle potential .sqlfluffignore files if ignore_files and fname == ignore_file_name: with open(fpath) as fh: spec = pathspec.PathSpec.from_lines("gitwildmatch", fh) matches = spec.match_tree(dirpath) for m in matches: ignore_path = os.path.join(dirpath, m) ignore_set.add(os.path.abspath(ignore_path)) # We don't need to process the ignore file any futher continue # We won't purge files *here* because there's an edge case # that the ignore file is processed after the sql file. # Scan for remaining files for ext in self.config.get("sql_file_exts", default=".sql").split(","): # is it a sql file? if fname.endswith(ext): buffer.append(fpath) if not ignore_files: return sorted(buffer) # Check the buffer for ignore items and normalise the rest. filtered_buffer = [] for fpath in buffer: if os.path.abspath(fpath) not in ignore_set: filtered_buffer.append(os.path.normpath(fpath)) elif is_exact_file: linter_logger.warning( "Exact file path %s was given but " "it was ignored by a %s pattern, " "re-run with `--disregard-sqlfluffignores` to " "skip %s" % ( path, ignore_file_name, ignore_file_name, ) ) # Return return sorted(filtered_buffer) def lint_string_wrapped( self, string: str, fname: str = "<string input>", fix: bool = False ) -> LintingResult: """Lint strings directly.""" result = LintingResult() linted_path = LintedDir(fname) linted_path.add(self.lint_string(string, fname=fname, fix=fix)) result.add(linted_path) result.stop_timer() return result def lint_path( self, path: str, fix: bool = False, ignore_non_existent_files: bool = False, ignore_files: bool = True, processes: int = 1, ) -> LintedDir: """Lint a path.""" linted_path = LintedDir(path) if self.formatter: self.formatter.dispatch_path(path) fnames = list( self.paths_from_path( path, ignore_non_existent_files=ignore_non_existent_files, ignore_files=ignore_files, ) ) runner = get_runner( self, self.config, processes=processes, allow_process_parallelism=self.allow_process_parallelism, ) for linted_file in runner.run(fnames, fix): linted_path.add(linted_file) # If any fatal errors, then stop iteration. if any(v.fatal for v in linted_file.violations): # pragma: no cover linter_logger.error("Fatal linting error. Halting further linting.") break return linted_path def lint_paths( self, paths: Tuple[str, ...], fix: bool = False, ignore_non_existent_files: bool = False, ignore_files: bool = True, processes: int = 1, ) -> LintingResult: """Lint an iterable of paths.""" # If no paths specified - assume local if len(paths) == 0: # pragma: no cover paths = (os.getcwd(),) # Set up the result to hold what we get back result = LintingResult() for path in paths: # Iterate through files recursively in the specified directory (if it's a directory) # or read the file directly if it's not result.add( self.lint_path( path, fix=fix, ignore_non_existent_files=ignore_non_existent_files, ignore_files=ignore_files, processes=processes, ) ) result.stop_timer() return result def parse_path( self, path: str, recurse: bool = True ) -> Generator[ParsedString, None, None]: """Parse a path of sql files. NB: This a generator which will yield the result of each file within the path iteratively. """ for fname in self.paths_from_path(path): if self.formatter: self.formatter.dispatch_path(path) # Load the file with the config and yield the result. raw_file, config, encoding = self._load_raw_file_and_config( fname, self.config ) yield self.parse_string( raw_file, fname=fname, recurse=recurse, config=config, encoding=encoding )
"""Defines the linter class.""" import os import time import logging from typing import ( Any, Generator, List, Sequence, Optional, Tuple, Union, cast, Iterable, ) import pathspec from sqlfluff.core.errors import ( SQLBaseError, SQLLexError, SQLLintError, SQLParseError, SQLTemplaterSkipFile, ) from sqlfluff.core.parser import Lexer, Parser from sqlfluff.core.file_helpers import get_encoding from sqlfluff.core.templaters import TemplatedFile from sqlfluff.core.rules import get_ruleset from sqlfluff.core.config import FluffConfig, ConfigLoader # Classes needed only for type checking from sqlfluff.core.linter.runner import get_runner from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.core.parser.segments.meta import MetaSegment from sqlfluff.core.parser.segments.raw import RawSegment from sqlfluff.core.rules.base import BaseRule from sqlfluff.core.linter.common import ( RuleTuple, ParsedString, NoQaDirective, RenderedFile, ) from sqlfluff.core.linter.linted_file import LintedFile from sqlfluff.core.linter.linted_dir import LintedDir from sqlfluff.core.linter.linting_result import LintingResult WalkableType = Iterable[Tuple[str, Optional[List[str]], List[str]]] # Instantiate the linter logger linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter") class Linter: """The interface class to interact with the linter.""" # Default to allowing process parallelism allow_process_parallelism = True def __init__( self, config: Optional[FluffConfig] = None, formatter: Any = None, dialect: Optional[str] = None, rules: Optional[Union[str, List[str]]] = None, user_rules: Optional[Union[str, List[str]]] = None, ) -> None: # Store the config object self.config = FluffConfig.from_kwargs( config=config, dialect=dialect, rules=rules ) # Get the dialect and templater self.dialect = self.config.get("dialect_obj") self.templater = self.config.get("templater_obj") # Store the formatter for output self.formatter = formatter # Store references to user rule classes self.user_rules = user_rules or [] def get_ruleset(self, config: Optional[FluffConfig] = None) -> List[BaseRule]: """Get hold of a set of rules.""" rs = get_ruleset() # Register any user rules for rule in self.user_rules: rs.register(rule) cfg = config or self.config return rs.get_rulelist(config=cfg) def rule_tuples(self) -> List[RuleTuple]: """A simple pass through to access the rule tuples of the rule set.""" rs = self.get_ruleset() return [RuleTuple(rule.code, rule.description) for rule in rs] # #### Static methods # These are the building blocks of the linting process. @staticmethod def _load_raw_file_and_config(fname, root_config): """Load a raw file and the associated config.""" file_config = root_config.make_child_from_path(fname) encoding = get_encoding(fname=fname, config=file_config) with open(fname, encoding=encoding, errors="backslashreplace") as target_file: raw_file = target_file.read() # Scan the raw file for config commands. file_config.process_raw_file_for_config(raw_file) # Return the raw file and config return raw_file, file_config, encoding @staticmethod def _lex_templated_file( templated_file: TemplatedFile, config: FluffConfig ) -> Tuple[Optional[Sequence[BaseSegment]], List[SQLLexError], FluffConfig]: """Lex a templated file. NOTE: This potentially mutates the config, so make sure to use the returned one. """ violations = [] linter_logger.info("LEXING RAW (%s)", templated_file.fname) # Get the lexer lexer = Lexer(config=config) # Lex the file and log any problems try: tokens, lex_vs = lexer.lex(templated_file) # We might just get the violations as a list violations += lex_vs linter_logger.info( "Lexed tokens: %s", [seg.raw for seg in tokens] if tokens else None ) except SQLLexError as err: linter_logger.info("LEXING FAILED! (%s): %s", templated_file.fname, err) violations.append(err) return None, violations, config if not tokens: # pragma: no cover TODO? return None, violations, config # Check that we've got sensible indentation from the lexer. # We might need to suppress if it's a complicated file. templating_blocks_indent = config.get("template_blocks_indent", "indentation") if isinstance(templating_blocks_indent, str): force_block_indent = templating_blocks_indent.lower().strip() == "force" else: force_block_indent = False templating_blocks_indent = bool(templating_blocks_indent) # If we're forcing it through we don't check. if templating_blocks_indent and not force_block_indent: indent_balance = sum( getattr(elem, "indent_val", 0) for elem in cast(Tuple[BaseSegment, ...], tokens) ) if indent_balance != 0: linter_logger.debug( "Indent balance test failed for %r. Template indents will not be linted for this file.", templated_file.fname, ) # Don't enable the templating blocks. templating_blocks_indent = False # Disable the linting of L003 on templated tokens. config.set_value(["rules", "L003", "lint_templated_tokens"], False) # The file will have been lexed without config, so check all indents # are enabled. new_tokens = [] for token in cast(Tuple[BaseSegment, ...], tokens): if token.is_meta: token = cast(MetaSegment, token) if token.indent_val != 0: # Don't allow it if we're not linting templating block indents. if not templating_blocks_indent: continue new_tokens.append(token) # Return new buffer return new_tokens, violations, config @staticmethod def _parse_tokens( tokens: Sequence[BaseSegment], config: FluffConfig, recurse: bool = True, fname: Optional[str] = None, ) -> Tuple[Optional[BaseSegment], List[SQLParseError]]: parser = Parser(config=config) violations = [] # Parse the file and log any problems try: parsed: Optional[BaseSegment] = parser.parse( tokens, recurse=recurse, fname=fname ) except SQLParseError as err: linter_logger.info("PARSING FAILED! : %s", err) violations.append(err) return None, violations if parsed: linter_logger.info("\n###\n#\n# {}\n#\n###".format("Parsed Tree:")) linter_logger.info("\n" + parsed.stringify()) # We may succeed parsing, but still have unparsable segments. Extract them here. for unparsable in parsed.iter_unparsables(): # No exception has been raised explicitly, but we still create one here # so that we can use the common interface violations.append( SQLParseError( "Line {0[0]}, Position {0[1]}: Found unparsable section: {1!r}".format( unparsable.pos_marker.working_loc, unparsable.raw if len(unparsable.raw) < 40 else unparsable.raw[:40] + "...", ), segment=unparsable, ) ) linter_logger.info("Found unparsable segment...") linter_logger.info(unparsable.stringify()) return parsed, violations @staticmethod def parse_noqa(comment: str, line_no: int): """Extract ignore mask entries from a comment string.""" # Also trim any whitespace afterward if comment.startswith("noqa"): # This is an ignore identifier comment_remainder = comment[4:] if comment_remainder: if not comment_remainder.startswith(":"): return SQLParseError( "Malformed 'noqa' section. Expected 'noqa: <rule>[,...]", line_no=line_no, ) comment_remainder = comment_remainder[1:].strip() if comment_remainder: action: Optional[str] if "=" in comment_remainder: action, rule_part = comment_remainder.split("=", 1) if action not in {"disable", "enable"}: # pragma: no cover return SQLParseError( "Malformed 'noqa' section. " "Expected 'noqa: enable=<rule>[,...] | all' " "or 'noqa: disable=<rule>[,...] | all", line_no=line_no, ) else: action = None rule_part = comment_remainder if rule_part in {"disable", "enable"}: return SQLParseError( "Malformed 'noqa' section. " "Expected 'noqa: enable=<rule>[,...] | all' " "or 'noqa: disable=<rule>[,...] | all", line_no=line_no, ) rules: Optional[Tuple[str, ...]] if rule_part != "all": rules = tuple(r.strip() for r in rule_part.split(",")) else: rules = None return NoQaDirective(line_no, rules, action) return NoQaDirective(line_no, None, None) return None @staticmethod def remove_templated_errors( linting_errors: List[SQLBaseError], ) -> List[SQLBaseError]: """Filter a list of lint errors, removing those which only occur in templated slices.""" # Filter out any linting errors in templated sections if relevant. result: List[SQLBaseError] = [] for e in linting_errors: if isinstance(e, SQLLintError): if ( # Is it in a literal section? e.segment.pos_marker.is_literal() # Is it a rule that is designed to work on templated sections? or e.rule.targets_templated ): result.append(e) else: # If it's another type, just keep it. (E.g. SQLParseError from # malformed "noqa" comment). result.append(e) return result @staticmethod def _warn_unfixable(code: str): linter_logger.warning( f"One fix for {code} not applied, it would re-cause the same error." ) # ### Class Methods # These compose the base static methods into useful recipes. @classmethod def parse_rendered(cls, rendered: RenderedFile, recurse: bool = True): """Parse a rendered file.""" t0 = time.monotonic() violations = cast(List[SQLBaseError], rendered.templater_violations) tokens: Optional[Sequence[BaseSegment]] if rendered.templated_file: tokens, lvs, config = cls._lex_templated_file( rendered.templated_file, rendered.config ) violations += lvs else: tokens = None t1 = time.monotonic() linter_logger.info("PARSING (%s)", rendered.fname) if tokens: parsed, pvs = cls._parse_tokens( tokens, rendered.config, recurse=recurse, fname=rendered.fname ) violations += pvs else: parsed = None time_dict = { **rendered.time_dict, "lexing": t1 - t0, "parsing": time.monotonic() - t1, } return ParsedString( parsed, violations, time_dict, rendered.templated_file, rendered.config, rendered.fname, ) @classmethod def extract_ignore_from_comment(cls, comment: RawSegment): """Extract ignore mask entries from a comment segment.""" # Also trim any whitespace afterward comment_content = comment.raw_trimmed().strip() comment_line, _ = comment.pos_marker.source_position() result = cls.parse_noqa(comment_content, comment_line) if isinstance(result, SQLParseError): result.segment = comment return result @classmethod def extract_ignore_mask( cls, tree: BaseSegment ) -> Tuple[List[NoQaDirective], List[SQLBaseError]]: """Look for inline ignore comments and return NoQaDirectives.""" ignore_buff: List[NoQaDirective] = [] violations: List[SQLBaseError] = [] for comment in tree.recursive_crawl("comment"): if comment.name == "inline_comment": ignore_entry = cls.extract_ignore_from_comment(comment) if isinstance(ignore_entry, SQLParseError): violations.append(ignore_entry) elif ignore_entry: ignore_buff.append(ignore_entry) if ignore_buff: linter_logger.info("Parsed noqa directives from file: %r", ignore_buff) return ignore_buff, violations @classmethod def lint_fix_parsed( cls, tree: BaseSegment, config: FluffConfig, rule_set: List[BaseRule], fix: bool = False, fname: Optional[str] = None, templated_file: Optional[TemplatedFile] = None, formatter: Any = None, ) -> Tuple[BaseSegment, List[SQLBaseError], List[NoQaDirective]]: """Lint and optionally fix a tree object.""" # Keep track of the linting errors all_linting_errors = [] # A placeholder for the fixes we had on the previous loop last_fixes = None # Keep a set of previous versions to catch infinite loops. previous_versions = {tree.raw} # If we are fixing then we want to loop up to the runaway_limit, otherwise just once for linting. loop_limit = config.get("runaway_limit") if fix else 1 # Dispatch the output for the lint header if formatter: formatter.dispatch_lint_header(fname) # Look for comment segments which might indicate lines to ignore. ignore_buff, ivs = cls.extract_ignore_mask(tree) all_linting_errors += ivs for loop in range(loop_limit): changed = False for crawler in rule_set: # fixes should be a dict {} with keys edit, delete, create # delete is just a list of segments to delete # edit and create are list of tuples. The first element is the # "anchor", the segment to look for either to edit or to insert BEFORE. # The second is the element to insert or create. linting_errors, _, fixes, _ = crawler.crawl( tree, ignore_mask=ignore_buff, dialect=config.get("dialect_obj"), fname=fname, templated_file=templated_file, ) all_linting_errors += linting_errors if fix and fixes: linter_logger.info(f"Applying Fixes [{crawler.code}]: {fixes}") # Do some sanity checks on the fixes before applying. if fixes == last_fixes: # pragma: no cover cls._warn_unfixable(crawler.code) else: last_fixes = fixes new_tree, _ = tree.apply_fixes(fixes) # Check for infinite loops if new_tree.raw not in previous_versions: # We've not seen this version of the file so far. Continue. tree = new_tree previous_versions.add(tree.raw) changed = True continue else: # Applying these fixes took us back to a state which we've # seen before. Abort. cls._warn_unfixable(crawler.code) if loop == 0: # Keep track of initial errors for reporting. initial_linting_errors = all_linting_errors.copy() if fix and not changed: # We did not change the file. Either the file is clean (no fixes), or # any fixes which are present will take us back to a previous state. linter_logger.info( f"Fix loop complete. Stability achieved after {loop}/{loop_limit} loops." ) break if fix and loop + 1 == loop_limit: linter_logger.warning(f"Loop limit on fixes reached [{loop_limit}].") if config.get("ignore_templated_areas", default=True): initial_linting_errors = cls.remove_templated_errors(initial_linting_errors) return tree, initial_linting_errors, ignore_buff @classmethod def lint_parsed( cls, parsed: ParsedString, rule_set: List[BaseRule], fix: bool = False, formatter: Any = None, encoding: str = "utf8", ): """Lint a ParsedString and return a LintedFile.""" violations = parsed.violations time_dict = parsed.time_dict tree: Optional[BaseSegment] if parsed.tree: t0 = time.monotonic() linter_logger.info("LINTING (%s)", parsed.fname) tree, initial_linting_errors, ignore_buff = cls.lint_fix_parsed( parsed.tree, config=parsed.config, rule_set=rule_set, fix=fix, fname=parsed.fname, templated_file=parsed.templated_file, formatter=formatter, ) # Update the timing dict time_dict["linting"] = time.monotonic() - t0 # We're only going to return the *initial* errors, rather # than any generated during the fixing cycle. violations += initial_linting_errors else: # If no parsed tree, set to None tree = None ignore_buff = [] # We process the ignore config here if appropriate for violation in violations: violation.ignore_if_in(parsed.config.get("ignore")) linted_file = LintedFile( parsed.fname, violations, time_dict, tree, ignore_mask=ignore_buff, templated_file=parsed.templated_file, encoding=encoding, ) # This is the main command line output from linting. if formatter: formatter.dispatch_file_violations( parsed.fname, linted_file, only_fixable=fix ) # Safety flag for unset dialects if parsed.config.get("dialect") == "ansi" and linted_file.get_violations( fixable=True if fix else None, types=SQLParseError ): if formatter: # pragma: no cover TODO? formatter.dispatch_dialect_warning() return linted_file @classmethod def lint_rendered( cls, rendered: RenderedFile, rule_set: List[BaseRule], fix: bool = False, formatter: Any = None, ) -> LintedFile: """Take a RenderedFile and return a LintedFile.""" parsed = cls.parse_rendered(rendered) return cls.lint_parsed( parsed, rule_set=rule_set, fix=fix, formatter=formatter, encoding=rendered.encoding, ) # ### Instance Methods # These are tied to a specific instance and so are not necessarily # safe to use in parallel operations. def render_string( self, in_str: str, fname: str, config: FluffConfig, encoding: str ) -> RenderedFile: """Template the file.""" linter_logger.info("TEMPLATING RAW [%s] (%s)", self.templater.name, fname) # Start the templating timer t0 = time.monotonic() if not config.get("templater_obj") == self.templater: linter_logger.warning( ( f"Attempt to set templater to {config.get('templater_obj').name} failed. Using {self.templater.name} " "templater. Templater cannot be set in a .sqlfluff file in a subdirectory of the current working " "directory. It can be set in a .sqlfluff in the current working directory. See Nesting section of the " "docs for more details." ) ) try: templated_file, templater_violations = self.templater.process( in_str=in_str, fname=fname, config=config, formatter=self.formatter ) except SQLTemplaterSkipFile as s: # pragma: no cover linter_logger.warning(str(s)) templated_file = None templater_violations = [] if not templated_file: linter_logger.info("TEMPLATING FAILED: %s", templater_violations) # Record time time_dict = {"templating": time.monotonic() - t0} return RenderedFile( templated_file, templater_violations, config, time_dict, fname, encoding ) def render_file(self, fname: str, root_config: FluffConfig) -> RenderedFile: """Load and render a file with relevant config.""" # Load the raw file. raw_file, config, encoding = self._load_raw_file_and_config(fname, root_config) # Render the file return self.render_string(raw_file, fname, config, encoding) def parse_string( self, in_str: str, fname: str = "<string>", recurse: bool = True, config: Optional[FluffConfig] = None, encoding: str = "utf-8", ) -> ParsedString: """Parse a string.""" violations: List[SQLBaseError] = [] # Dispatch the output for the template header (including the config diff) if self.formatter: self.formatter.dispatch_template_header(fname, self.config, config) # Just use the local config from here: config = config or self.config # Scan the raw file for config commands. config.process_raw_file_for_config(in_str) rendered = self.render_string(in_str, fname, config, encoding) violations += rendered.templater_violations # Dispatch the output for the parse header if self.formatter: self.formatter.dispatch_parse_header(fname) return self.parse_rendered(rendered, recurse=recurse) def fix( self, tree: BaseSegment, config: Optional[FluffConfig] = None, fname: Optional[str] = None, templated_file: Optional[TemplatedFile] = None, ) -> Tuple[BaseSegment, List[SQLBaseError]]: """Return the fixed tree and violations from lintfix when we're fixing.""" config = config or self.config rule_set = self.get_ruleset(config=config) fixed_tree, violations, _ = self.lint_fix_parsed( tree, config, rule_set, fix=True, fname=fname, templated_file=templated_file, formatter=self.formatter, ) return fixed_tree, violations def lint( self, tree: BaseSegment, config: Optional[FluffConfig] = None, fname: Optional[str] = None, templated_file: Optional[TemplatedFile] = None, ) -> List[SQLBaseError]: """Return just the violations from lintfix when we're only linting.""" config = config or self.config rule_set = self.get_ruleset(config=config) _, violations, _ = self.lint_fix_parsed( tree, config, rule_set, fix=False, fname=fname, templated_file=templated_file, formatter=self.formatter, ) return violations def lint_string( self, in_str: str = "", fname: str = "<string input>", fix: bool = False, config: Optional[FluffConfig] = None, encoding: str = "utf8", ) -> LintedFile: """Lint a string. Returns: :obj:`LintedFile`: an object representing that linted file. """ # Sort out config, defaulting to the built in config if no override config = config or self.config # Parse the string. parsed = self.parse_string(in_str=in_str, fname=fname, config=config) # Get rules as appropriate rule_set = self.get_ruleset(config=config) # Lint the file and return the LintedFile return self.lint_parsed( parsed, rule_set, fix=fix, formatter=self.formatter, encoding=encoding ) def paths_from_path( self, path: str, ignore_file_name: str = ".sqlfluffignore", ignore_non_existent_files: bool = False, ignore_files: bool = True, working_path: str = os.getcwd(), ) -> List[str]: """Return a set of sql file paths from a potentially more ambiguous path string. Here we also deal with the .sqlfluffignore file if present. When a path to a file to be linted is explicitly passed we look for ignore files in all directories that are parents of the file, up to the current directory. If the current directory is not a parent of the file we only look for an ignore file in the direct parent of the file. """ if not os.path.exists(path): if ignore_non_existent_files: return [] else: raise OSError("Specified path does not exist") # Files referred to exactly are also ignored if # matched, but we warn the users when that happens is_exact_file = os.path.isfile(path) if is_exact_file: # When the exact file to lint is passed, we # fill path_walk with an input that follows # the structure of `os.walk`: # (root, directories, files) dirpath = os.path.dirname(path) files = [os.path.basename(path)] ignore_file_paths = ConfigLoader.find_ignore_config_files( path=path, working_path=working_path, ignore_file_name=ignore_file_name ) # Add paths that could contain "ignore files" # to the path_walk list path_walk_ignore_file = [ ( os.path.dirname(ignore_file_path), None, # Only one possible file, since we only # have one "ignore file name" [os.path.basename(ignore_file_path)], ) for ignore_file_path in ignore_file_paths ] path_walk: WalkableType = [(dirpath, None, files)] + path_walk_ignore_file else: path_walk = os.walk(path) # If it's a directory then expand the path! buffer = [] ignore_set = set() for dirpath, _, filenames in path_walk: for fname in filenames: fpath = os.path.join(dirpath, fname) # Handle potential .sqlfluffignore files if ignore_files and fname == ignore_file_name: with open(fpath) as fh: spec = pathspec.PathSpec.from_lines("gitwildmatch", fh) matches = spec.match_tree(dirpath) for m in matches: ignore_path = os.path.join(dirpath, m) ignore_set.add(os.path.abspath(ignore_path)) # We don't need to process the ignore file any futher continue # We won't purge files *here* because there's an edge case # that the ignore file is processed after the sql file. # Scan for remaining files for ext in self.config.get("sql_file_exts", default=".sql").split(","): # is it a sql file? if fname.endswith(ext): buffer.append(fpath) if not ignore_files: return sorted(buffer) # Check the buffer for ignore items and normalise the rest. filtered_buffer = [] for fpath in buffer: if os.path.abspath(fpath) not in ignore_set: filtered_buffer.append(os.path.normpath(fpath)) elif is_exact_file: linter_logger.warning( "Exact file path %s was given but " "it was ignored by a %s pattern, " "re-run with `--disregard-sqlfluffignores` to " "skip %s" % ( path, ignore_file_name, ignore_file_name, ) ) # Return return sorted(filtered_buffer) def lint_string_wrapped( self, string: str, fname: str = "<string input>", fix: bool = False ) -> LintingResult: """Lint strings directly.""" result = LintingResult() linted_path = LintedDir(fname) linted_path.add(self.lint_string(string, fname=fname, fix=fix)) result.add(linted_path) result.stop_timer() return result def lint_path( self, path: str, fix: bool = False, ignore_non_existent_files: bool = False, ignore_files: bool = True, processes: int = 1, ) -> LintedDir: """Lint a path.""" linted_path = LintedDir(path) if self.formatter: self.formatter.dispatch_path(path) fnames = list( self.paths_from_path( path, ignore_non_existent_files=ignore_non_existent_files, ignore_files=ignore_files, ) ) runner = get_runner( self, self.config, processes=processes, allow_process_parallelism=self.allow_process_parallelism, ) for linted_file in runner.run(fnames, fix): linted_path.add(linted_file) # If any fatal errors, then stop iteration. if any(v.fatal for v in linted_file.violations): # pragma: no cover linter_logger.error("Fatal linting error. Halting further linting.") break return linted_path def lint_paths( self, paths: Tuple[str, ...], fix: bool = False, ignore_non_existent_files: bool = False, ignore_files: bool = True, processes: int = 1, ) -> LintingResult: """Lint an iterable of paths.""" # If no paths specified - assume local if len(paths) == 0: # pragma: no cover paths = (os.getcwd(),) # Set up the result to hold what we get back result = LintingResult() for path in paths: # Iterate through files recursively in the specified directory (if it's a directory) # or read the file directly if it's not result.add( self.lint_path( path, fix=fix, ignore_non_existent_files=ignore_non_existent_files, ignore_files=ignore_files, processes=processes, ) ) result.stop_timer() return result def parse_path( self, path: str, recurse: bool = True ) -> Generator[ParsedString, None, None]: """Parse a path of sql files. NB: This a generator which will yield the result of each file within the path iteratively. """ for fname in self.paths_from_path(path): if self.formatter: self.formatter.dispatch_path(path) # Load the file with the config and yield the result. raw_file, config, encoding = self._load_raw_file_and_config( fname, self.config ) yield self.parse_string( raw_file, fname=fname, recurse=recurse, config=config, encoding=encoding )
from os import error, name, supports_bytes_environ import clean import phone_book as pb import notes_book_1 as nb # tuple with commands words EXIT_COMMANDS = ("good bye", "close", "exit", "bye") FIND_COMMANDS = ("find",) EDIT_COMMANDS = ("edit",) BIRTHDAY_COMMANDS = ("birthday",) SELECT_COMMANDS = ("select", "sel") ADD_COMMANDS = ("add", "+") DELETE_COMMANDS = ("delete", "del", "-",) GREETING_COMMANDS = ("hello", "alloha",) SHOW_ALL_COMMANDS = ("show all", "show") HELP_COMMANDS = ("help",) CURRENT_MODES = {'1': 'PhoneBook', '2': 'NotesBook', '3': 'SortFolder'} CURRENT_MODE = '' CURRENT_RECORD = None CURRENT_ID = None # function helpers def loadAB(): if not pb.load_addressBook(): ab = pb.AddressBook() else: ab = pb.load_addressBook() return ab def load_obj_record(rec): if CURRENT_MODE == '1': return pb.Record(pb.Name(rec['name']), pb.Email(rec['email']), pb.Address(rec['address']), pb.Birthday(rec['birthday']), *(pb.Phone(i) for i in rec['phones'])) elif CURRENT_MODE == '2': return nb.NoteRecord(nb.Note(rec['Note']), nb.Teg(rec['Teg'])) def loadNB(): if not nb.load_notesBook(): notesB = nb.NotesBook() else: notesB = nb.load_notesBook() return notesB def check_id(id): result = False if id.isdigit(): result = True return result def input_name(): while True: result = input('Contact name (required): ') if len(result) >= 3: return pb.Name(result) else: print("Name must have 3 or more characters!!") def input_phone(): while True: try: phone = pb.Phone(input('Contact phone (required): ')) break except ValueError as e: print(e) return phone def input_birthday(): while True: try: birthday = pb.Birthday( input('Contact birthday "MM-DD" format (optional): ')) break except ValueError as e: print(e) return birthday def input_email(): while True: try: email = pb.Email(input('Contact email (optional): ')) break except ValueError as e: print(e) return email def input_address(): return pb.Address(input('New address: ')) def input_note(): while True: try: note = nb.Note(input('Type your notes (required): ')) break except ValueError as e: print(e) return note def input_teg(): while True: try: teg = nb.Teg(input('Type Teg for your notes (optional): ')) break except ValueError as e: print(e) return teg def add_contact(*args): ab = loadAB() name = input_name() address = pb.Address(input('Contact address (optional): ')) phone = input_phone() birthday = input_birthday() email = input_email() record = pb.Record(name, address, phone, birthday, email) confirm = input( f'Add record {record} to address book (y/n)?: ') if confirm.lower() == 'y': ab.add_record(record) pb.save_addressBook(ab) def delete_ab_record(id): if not id: return (f'Soryy, for deleting type id record after command') else: ab = loadAB() ab.delete_record(int(id)) pb.save_addressBook(ab) def delete_nb_record(id): if not id: return (f'Soryy, for deleting type id record after command') else: noteB = loadNB() noteB.delete_record(int(id)) nb.save_notesBook(noteB) def add_notes(*args): notesB = loadNB() note = input_note() teg = input_teg() record = nb.NoteRecord(note, teg) confirm = input( f'Add notes {record.noterecors} to NotessBook (y/n)?: ') if confirm.lower() == 'y': notesB.add_record(record) nb.save_notesBook(notesB) def update_ab(record): global CURRENT_RECORD, CURRENT_ID delete_ab_record(CURRENT_ID) ab = loadAB() ab.add_record(record) pb.save_addressBook(ab) CURRENT_ID = None CURRENT_RECORD = None def update_nb(record): global CURRENT_RECORD, CURRENT_ID delete_nb_record(CURRENT_ID) noteB = loadNB() noteB.add_record(record) nb.save_notesBook(noteB) CURRENT_ID = None CURRENT_RECORD = None def phone_add(*args): if CURRENT_MODE == '1' and CURRENT_RECORD: phone = input_phone() CURRENT_RECORD.add_phone(phone) update_ab(CURRENT_RECORD) # commands functions def add_command(*args): if CURRENT_MODE == '1' and not CURRENT_RECORD: add_contact() elif CURRENT_MODE == '1' and CURRENT_RECORD and args[0] == 'phone': phone_add() if CURRENT_MODE == '2': add_notes() def birthday_command(*args): result = [] if CURRENT_MODE == '1': ab = loadAB() for key, rec in ab.data.items(): if not args[0] == '': if not args[0].isdigit(): return print(f'Parametr must be a digits') if load_obj_record(rec).days_to_birthday() <= int(args[0]): result.append(ab[key]) else: result.append( f'id - {key} | name - {rec['name']} | birthday - {rec['birthday']}') if len(result) == 0: result.append( f'You are a happy man \N{grinning face with smiling eyes}') for i in result: print(i) def greeting_command(*args): print(f'in greeting_command') def show_all_command(*args): if CURRENT_MODE == '1' and not CURRENT_RECORD: ab = loadAB() print(ab) else: print(CURRENT_RECORD) if CURRENT_MODE == '2': if not nb.load_notesBook(): noteB = nb.NotesBook() else: noteB = nb.load_notesBook() print(noteB) def select_command(*args): global CURRENT_RECORD, CURRENT_ID if check_id(args[0]): CURRENT_ID = args[0] else: print(f'Parametr "id" must be a digit not "{args[0]}"') return None if CURRENT_MODE == '1': ab = loadAB() try: CURRENT_RECORD = load_obj_record(ab.data[int(CURRENT_ID)]) except KeyError as e: print(f'Sorry, PhoneBook has no record with id {e}') # pb.Record(pb.Name(ab[int(args[0])]['name']), pb.Email(ab[int(args[0])]['email']), pb.Address( # ab[int(args[0])]['address']), pb.Birthday(ab[int(args[0])]['birthday']), *(pb.Phone(i) for i in ab[int(args[0])]['phones'])) elif CURRENT_MODE == '2': noteB = loadNB() try: CURRENT_RECORD = load_obj_record(noteB.data[int(CURRENT_ID)]) except KeyError as e: print(f'Sorry, NoteBook has no record with id {e}') def edit_command(*args): if not CURRENT_RECORD: print(f'Before editing record - please, select it. (Command "select id_record")') if CURRENT_MODE == '1' and CURRENT_RECORD and args[0].lower() == 'phone': try: CURRENT_ID = args[1] except IndexError: print(f'Please type id phone after command') return None if not check_id(CURRENT_ID): print(f'Parametr "id" must be a digit not "{CURRENT_ID}"') return None new_phone = input_phone() CURRENT_RECORD.delete_phone(int(CURRENT_ID)) CURRENT_RECORD.add_phone(new_phone) update_ab(CURRENT_RECORD) elif CURRENT_MODE == '1' and CURRENT_RECORD and args[0].lower() == 'name': CURRENT_RECORD.records['name'] = input_name().value update_ab(CURRENT_RECORD) elif CURRENT_MODE == '1' and CURRENT_RECORD and args[0].lower() == 'birthday': CURRENT_RECORD.records['birthday'] = input_birthday().value update_ab(CURRENT_RECORD) elif CURRENT_MODE == '1' and CURRENT_RECORD and args[0].lower() == 'email': CURRENT_RECORD.records['email'] = input_email().value update_ab(CURRENT_RECORD) elif CURRENT_MODE == '1' and CURRENT_RECORD and args[0].lower() == 'address': CURRENT_RECORD.records['address'] = input_address().value update_ab(CURRENT_RECORD) elif CURRENT_MODE == '2' and CURRENT_RECORD and args[0].lower() == 'note': CURRENT_RECORD.edit_note(input_note()) update_nb(CURRENT_RECORD) elif CURRENT_MODE == '2' and CURRENT_RECORD and args[0].lower() == 'teg': CURRENT_RECORD.edit_teg(input_teg()) update_nb(CURRENT_RECORD) else: print(non_command()) def help_command(*args): if CURRENT_MODE == '1' or CURRENT_MODE == '2': print(f"""In {CURRENT_MODES[CURRENT_MODE]} mode.\n You can see all records in your {CURRENT_MODES[CURRENT_MODE]} - just type "Show" command \n You can add, delete or edit record in your AddressBook\n For adding type {ADD_COMMANDS} \n For deleting type {DELETE_COMMANDS} and specify the id record \n Before editing you must select record, type {SELECT_COMMANDS} and specify the id \n """) elif CURRENT_MODE == '3': print(f"""In {CURRENT_MODES[CURRENT_MODE]} mode.\n You can sort and organize your folders, just type command "sort" and PATH to folder\n """) else: print(f"""I can work in different mode.\n 1. First - {CURRENT_MODES['1']} mode\n 2. Second - {CURRENT_MODES['2']} mode\n 3. Third - {CURRENT_MODES['3']} mode\n in each mode you can call command 'help' for more information""") def delete_command(*args): if CURRENT_MODE == '1' and not CURRENT_RECORD and args[0].isdigit(): delete_ab_record(args[0]) elif CURRENT_MODE == '1' and CURRENT_RECORD and args[0] == 'phone': CURRENT_RECORD.delete_phone(int(args[1])) update_ab(CURRENT_RECORD) elif CURRENT_MODE == '2' and not CURRENT_RECORD and args[0].isdigit(): delete_nb_record(args[0]) def find_command(*args): if CURRENT_MODE == '1': ab = loadAB() try: for value in ab.find(args[0]): print(value) except ValueError as e: print(e) if CURRENT_MODE == '2': noteB = loadNB() try: for value in noteB.find_note(args[0]): print(value) except ValueError as e: print(e) def exit_command(*args): global CURRENT_RECORD CURRENT_RECORD = None return('exit') COMMANDS = {ADD_COMMANDS: add_command, GREETING_COMMANDS: greeting_command, SHOW_ALL_COMMANDS: show_all_command, EXIT_COMMANDS: exit_command, HELP_COMMANDS: help_command, DELETE_COMMANDS: delete_command, SELECT_COMMANDS: select_command, EDIT_COMMANDS: edit_command, FIND_COMMANDS: find_command, BIRTHDAY_COMMANDS: birthday_command} # general function def non_command(): return 'Sorry, i don`t know this command' def parse_data(command, list): for i in list: if command.startswith(i): data = command.replace(i, '').strip() return data.split(' ') def parse_command(command): for i in COMMANDS.keys(): com = command.lower() if com.startswith(i): data = parse_data(command, i) return COMMANDS[i](*data) return non_command() def work_mode(*args): global CURRENT_MODE if args[0] in CURRENT_MODES.keys(): print(f'We are in {CURRENT_MODES[args[0]]} mode') CURRENT_MODE = args[0] while True: result = parse_command( input(f'({CURRENT_MODES[args[0]]} mode {'' if not CURRENT_RECORD else str(CURRENT_RECORD) }) type command: ')) if result == 'exit': print("Good Bye!") CURRENT_MODE = '' break else: pass if __name__ == '__main__': print('Hi! I\'m your personal helper (PH). For more information type "help"') while True: result = input('PH says - please, select a workmode or "exit":') if result in ('1', '2', '3'): work_mode(result) else: result = parse_command(result) if result == 'exit': print("Good Bye!") break
from os import error, name, supports_bytes_environ import clean import phone_book as pb import notes_book_1 as nb # tuple with commands words EXIT_COMMANDS = ("good bye", "close", "exit", "bye") FIND_COMMANDS = ("find",) EDIT_COMMANDS = ("edit",) BIRTHDAY_COMMANDS = ("birthday",) SELECT_COMMANDS = ("select", "sel") ADD_COMMANDS = ("add", "+") DELETE_COMMANDS = ("delete", "del", "-",) GREETING_COMMANDS = ("hello", "alloha",) SHOW_ALL_COMMANDS = ("show all", "show") HELP_COMMANDS = ("help",) CURRENT_MODES = {'1': 'PhoneBook', '2': 'NotesBook', '3': 'SortFolder'} CURRENT_MODE = '' CURRENT_RECORD = None CURRENT_ID = None # function helpers def loadAB(): if not pb.load_addressBook(): ab = pb.AddressBook() else: ab = pb.load_addressBook() return ab def load_obj_record(rec): if CURRENT_MODE == '1': return pb.Record(pb.Name(rec['name']), pb.Email(rec['email']), pb.Address(rec['address']), pb.Birthday(rec['birthday']), *(pb.Phone(i) for i in rec['phones'])) elif CURRENT_MODE == '2': return nb.NoteRecord(nb.Note(rec['Note']), nb.Teg(rec['Teg'])) def loadNB(): if not nb.load_notesBook(): notesB = nb.NotesBook() else: notesB = nb.load_notesBook() return notesB def check_id(id): result = False if id.isdigit(): result = True return result def input_name(): while True: result = input('Contact name (required): ') if len(result) >= 3: return pb.Name(result) else: print("Name must have 3 or more characters!!") def input_phone(): while True: try: phone = pb.Phone(input('Contact phone (required): ')) break except ValueError as e: print(e) return phone def input_birthday(): while True: try: birthday = pb.Birthday( input('Contact birthday "MM-DD" format (optional): ')) break except ValueError as e: print(e) return birthday def input_email(): while True: try: email = pb.Email(input('Contact email (optional): ')) break except ValueError as e: print(e) return email def input_address(): return pb.Address(input('New address: ')) def input_note(): while True: try: note = nb.Note(input('Type your notes (required): ')) break except ValueError as e: print(e) return note def input_teg(): while True: try: teg = nb.Teg(input('Type Teg for your notes (optional): ')) break except ValueError as e: print(e) return teg def add_contact(*args): ab = loadAB() name = input_name() address = pb.Address(input('Contact address (optional): ')) phone = input_phone() birthday = input_birthday() email = input_email() record = pb.Record(name, address, phone, birthday, email) confirm = input( f'Add record {record} to address book (y/n)?: ') if confirm.lower() == 'y': ab.add_record(record) pb.save_addressBook(ab) def delete_ab_record(id): if not id: return (f'Soryy, for deleting type id record after command') else: ab = loadAB() ab.delete_record(int(id)) pb.save_addressBook(ab) def delete_nb_record(id): if not id: return (f'Soryy, for deleting type id record after command') else: noteB = loadNB() noteB.delete_record(int(id)) nb.save_notesBook(noteB) def add_notes(*args): notesB = loadNB() note = input_note() teg = input_teg() record = nb.NoteRecord(note, teg) confirm = input( f'Add notes {record.noterecors} to NotessBook (y/n)?: ') if confirm.lower() == 'y': notesB.add_record(record) nb.save_notesBook(notesB) def update_ab(record): global CURRENT_RECORD, CURRENT_ID delete_ab_record(CURRENT_ID) ab = loadAB() ab.add_record(record) pb.save_addressBook(ab) CURRENT_ID = None CURRENT_RECORD = None def update_nb(record): global CURRENT_RECORD, CURRENT_ID delete_nb_record(CURRENT_ID) noteB = loadNB() noteB.add_record(record) nb.save_notesBook(noteB) CURRENT_ID = None CURRENT_RECORD = None def phone_add(*args): if CURRENT_MODE == '1' and CURRENT_RECORD: phone = input_phone() CURRENT_RECORD.add_phone(phone) update_ab(CURRENT_RECORD) # commands functions def add_command(*args): if CURRENT_MODE == '1' and not CURRENT_RECORD: add_contact() elif CURRENT_MODE == '1' and CURRENT_RECORD and args[0] == 'phone': phone_add() if CURRENT_MODE == '2': add_notes() def birthday_command(*args): result = [] if CURRENT_MODE == '1': ab = loadAB() for key, rec in ab.data.items(): if not args[0] == '': if not args[0].isdigit(): return print(f'Parametr must be a digits') if load_obj_record(rec).days_to_birthday() <= int(args[0]): result.append(ab[key]) else: result.append( f'id - {key} | name - {rec["name"]} | birthday - {rec["birthday"]}') if len(result) == 0: result.append( f'You are a happy man \N{grinning face with smiling eyes}') for i in result: print(i) def greeting_command(*args): print(f'in greeting_command') def show_all_command(*args): if CURRENT_MODE == '1' and not CURRENT_RECORD: ab = loadAB() print(ab) else: print(CURRENT_RECORD) if CURRENT_MODE == '2': if not nb.load_notesBook(): noteB = nb.NotesBook() else: noteB = nb.load_notesBook() print(noteB) def select_command(*args): global CURRENT_RECORD, CURRENT_ID if check_id(args[0]): CURRENT_ID = args[0] else: print(f'Parametr "id" must be a digit not "{args[0]}"') return None if CURRENT_MODE == '1': ab = loadAB() try: CURRENT_RECORD = load_obj_record(ab.data[int(CURRENT_ID)]) except KeyError as e: print(f'Sorry, PhoneBook has no record with id {e}') # pb.Record(pb.Name(ab[int(args[0])]['name']), pb.Email(ab[int(args[0])]['email']), pb.Address( # ab[int(args[0])]['address']), pb.Birthday(ab[int(args[0])]['birthday']), *(pb.Phone(i) for i in ab[int(args[0])]['phones'])) elif CURRENT_MODE == '2': noteB = loadNB() try: CURRENT_RECORD = load_obj_record(noteB.data[int(CURRENT_ID)]) except KeyError as e: print(f'Sorry, NoteBook has no record with id {e}') def edit_command(*args): if not CURRENT_RECORD: print(f'Before editing record - please, select it. (Command "select id_record")') if CURRENT_MODE == '1' and CURRENT_RECORD and args[0].lower() == 'phone': try: CURRENT_ID = args[1] except IndexError: print(f'Please type id phone after command') return None if not check_id(CURRENT_ID): print(f'Parametr "id" must be a digit not "{CURRENT_ID}"') return None new_phone = input_phone() CURRENT_RECORD.delete_phone(int(CURRENT_ID)) CURRENT_RECORD.add_phone(new_phone) update_ab(CURRENT_RECORD) elif CURRENT_MODE == '1' and CURRENT_RECORD and args[0].lower() == 'name': CURRENT_RECORD.records['name'] = input_name().value update_ab(CURRENT_RECORD) elif CURRENT_MODE == '1' and CURRENT_RECORD and args[0].lower() == 'birthday': CURRENT_RECORD.records['birthday'] = input_birthday().value update_ab(CURRENT_RECORD) elif CURRENT_MODE == '1' and CURRENT_RECORD and args[0].lower() == 'email': CURRENT_RECORD.records['email'] = input_email().value update_ab(CURRENT_RECORD) elif CURRENT_MODE == '1' and CURRENT_RECORD and args[0].lower() == 'address': CURRENT_RECORD.records['address'] = input_address().value update_ab(CURRENT_RECORD) elif CURRENT_MODE == '2' and CURRENT_RECORD and args[0].lower() == 'note': CURRENT_RECORD.edit_note(input_note()) update_nb(CURRENT_RECORD) elif CURRENT_MODE == '2' and CURRENT_RECORD and args[0].lower() == 'teg': CURRENT_RECORD.edit_teg(input_teg()) update_nb(CURRENT_RECORD) else: print(non_command()) def help_command(*args): if CURRENT_MODE == '1' or CURRENT_MODE == '2': print(f"""In {CURRENT_MODES[CURRENT_MODE]} mode.\n You can see all records in your {CURRENT_MODES[CURRENT_MODE]} - just type "Show" command \n You can add, delete or edit record in your AddressBook\n For adding type {ADD_COMMANDS} \n For deleting type {DELETE_COMMANDS} and specify the id record \n Before editing you must select record, type {SELECT_COMMANDS} and specify the id \n """) elif CURRENT_MODE == '3': print(f"""In {CURRENT_MODES[CURRENT_MODE]} mode.\n You can sort and organize your folders, just type command "sort" and PATH to folder\n """) else: print(f"""I can work in different mode.\n 1. First - {CURRENT_MODES['1']} mode\n 2. Second - {CURRENT_MODES['2']} mode\n 3. Third - {CURRENT_MODES['3']} mode\n in each mode you can call command 'help' for more information""") def delete_command(*args): if CURRENT_MODE == '1' and not CURRENT_RECORD and args[0].isdigit(): delete_ab_record(args[0]) elif CURRENT_MODE == '1' and CURRENT_RECORD and args[0] == 'phone': CURRENT_RECORD.delete_phone(int(args[1])) update_ab(CURRENT_RECORD) elif CURRENT_MODE == '2' and not CURRENT_RECORD and args[0].isdigit(): delete_nb_record(args[0]) def find_command(*args): if CURRENT_MODE == '1': ab = loadAB() try: for value in ab.find(args[0]): print(value) except ValueError as e: print(e) if CURRENT_MODE == '2': noteB = loadNB() try: for value in noteB.find_note(args[0]): print(value) except ValueError as e: print(e) def exit_command(*args): global CURRENT_RECORD CURRENT_RECORD = None return('exit') COMMANDS = {ADD_COMMANDS: add_command, GREETING_COMMANDS: greeting_command, SHOW_ALL_COMMANDS: show_all_command, EXIT_COMMANDS: exit_command, HELP_COMMANDS: help_command, DELETE_COMMANDS: delete_command, SELECT_COMMANDS: select_command, EDIT_COMMANDS: edit_command, FIND_COMMANDS: find_command, BIRTHDAY_COMMANDS: birthday_command} # general function def non_command(): return 'Sorry, i don`t know this command' def parse_data(command, list): for i in list: if command.startswith(i): data = command.replace(i, '').strip() return data.split(' ') def parse_command(command): for i in COMMANDS.keys(): com = command.lower() if com.startswith(i): data = parse_data(command, i) return COMMANDS[i](*data) return non_command() def work_mode(*args): global CURRENT_MODE if args[0] in CURRENT_MODES.keys(): print(f'We are in {CURRENT_MODES[args[0]]} mode') CURRENT_MODE = args[0] while True: result = parse_command( input(f'({CURRENT_MODES[args[0]]} mode {"" if not CURRENT_RECORD else str(CURRENT_RECORD) }) type command: ')) if result == 'exit': print("Good Bye!") CURRENT_MODE = '' break else: pass if __name__ == '__main__': print('Hi! I\'m your personal helper (PH). For more information type "help"') while True: result = input('PH says - please, select a workmode or "exit":') if result in ('1', '2', '3'): work_mode(result) else: result = parse_command(result) if result == 'exit': print("Good Bye!") break
import json from typing import Type, Union, Dict import webbrowser from jose import jwt from keycloak import KeycloakOpenID class AuthenticationClient: """A facade for the some authentication implementation, currently keycloak.""" issuer = None json_web_key_set: dict = {} def __init__(self, conf: Union[str, dict], redirect_url: str): """ :param conf: Client configuration. Can be a path to a json file. :type conf: Union[str, dict] :param redirect_url: The url that the authentication server will redirect to after a successful login. :type redirect_url: str """ self.config = self._get_config_from_file(conf) if isinstance(conf, str) else conf self._redirect_url = redirect_url self._client = keycloak_client(self.config) self.issuer = self._client.well_know()["issuer"] self.json_web_key_set = self._client.certs() def authentication_url(self) -> str: """The URL of the authentication server that is used to authenticate. :return: The request url including params. :rtype: str """ return self._client.auth_url(self._redirect_url) def browser_login(self) -> None: """Open a browser window to login. :rtype: None """ webbrowser.open(self.authentication_url()) def logout(self, refresh_token: str) -> None: """Logout the user. :param refresh_token: The refresh token provided by the last authentication payload. :type refresh_token: str """ self._client.logout(refresh_token) def token_using_auth_code(self, code: str) -> dict: """Obtain a token using an authorization code from the auth server. An authorization code is obtained from the auth server after a successful login. :param code: The authorization code. :type code: str :return: A dictionary containing the token, refresh token, and other info. :rtype: dict """ return self._client.token( code=code, grant_type=["authorization_code"], redirect_uri=self._redirect_url, ) def token_using_username_password(self, username: str, password: str) -> dict: """[summary] :param username: [description] :type username: str :param password: [description] :type password: str :return: [description] :rtype: dict """ return self._client.token(username=username, password=password) def users_info(self, auth_token: str) -> Dict[str, str]: import requests users_response = requests.get( f"{self.config["auth-server-url"]}/admin/realms/Atlas/users", headers={"Authorization": f"Bearer {auth_token}"}, ).json() return {info["id"]: info["username"] for info in users_response} def decode_jwt(self, auth_token: str) -> dict: unverified_header = jwt.get_unverified_header(auth_token) rsa_key = self._jwt_rsa_key(unverified_header) payload = jwt.decode( auth_token, rsa_key, algorithms=["RS256"], audience="account", issuer=self.issuer, ) return payload def _jwt_rsa_key(self, unverified_header) -> Dict[str, str]: rsa_key: Dict[str, str] = {} for key in self.json_web_key_set["keys"]: if key["kid"] == unverified_header["kid"]: rsa_key = { "kty": key["kty"], "kid": key["kid"], "use": key["use"], "n": key["n"], "e": key["e"], } return rsa_key @staticmethod def _get_config_from_file(fname: str) -> dict: with open(fname) as config_file: return json.load(config_file) def keycloak_client(config: dict) -> KeycloakOpenID: creds = config.get("credentials", None) secret_key = creds["secret"] if creds else None return KeycloakOpenID( server_url=config["auth-server-url"] + "/", realm_name=config["realm"], client_id=config["resource"], client_secret_key=secret_key, )
import json from typing import Type, Union, Dict import webbrowser from jose import jwt from keycloak import KeycloakOpenID class AuthenticationClient: """A facade for the some authentication implementation, currently keycloak.""" issuer = None json_web_key_set: dict = {} def __init__(self, conf: Union[str, dict], redirect_url: str): """ :param conf: Client configuration. Can be a path to a json file. :type conf: Union[str, dict] :param redirect_url: The url that the authentication server will redirect to after a successful login. :type redirect_url: str """ self.config = self._get_config_from_file(conf) if isinstance(conf, str) else conf self._redirect_url = redirect_url self._client = keycloak_client(self.config) self.issuer = self._client.well_know()["issuer"] self.json_web_key_set = self._client.certs() def authentication_url(self) -> str: """The URL of the authentication server that is used to authenticate. :return: The request url including params. :rtype: str """ return self._client.auth_url(self._redirect_url) def browser_login(self) -> None: """Open a browser window to login. :rtype: None """ webbrowser.open(self.authentication_url()) def logout(self, refresh_token: str) -> None: """Logout the user. :param refresh_token: The refresh token provided by the last authentication payload. :type refresh_token: str """ self._client.logout(refresh_token) def token_using_auth_code(self, code: str) -> dict: """Obtain a token using an authorization code from the auth server. An authorization code is obtained from the auth server after a successful login. :param code: The authorization code. :type code: str :return: A dictionary containing the token, refresh token, and other info. :rtype: dict """ return self._client.token( code=code, grant_type=["authorization_code"], redirect_uri=self._redirect_url, ) def token_using_username_password(self, username: str, password: str) -> dict: """[summary] :param username: [description] :type username: str :param password: [description] :type password: str :return: [description] :rtype: dict """ return self._client.token(username=username, password=password) def users_info(self, auth_token: str) -> Dict[str, str]: import requests users_response = requests.get( f"{self.config['auth-server-url']}/admin/realms/Atlas/users", headers={"Authorization": f"Bearer {auth_token}"}, ).json() return {info["id"]: info["username"] for info in users_response} def decode_jwt(self, auth_token: str) -> dict: unverified_header = jwt.get_unverified_header(auth_token) rsa_key = self._jwt_rsa_key(unverified_header) payload = jwt.decode( auth_token, rsa_key, algorithms=["RS256"], audience="account", issuer=self.issuer, ) return payload def _jwt_rsa_key(self, unverified_header) -> Dict[str, str]: rsa_key: Dict[str, str] = {} for key in self.json_web_key_set["keys"]: if key["kid"] == unverified_header["kid"]: rsa_key = { "kty": key["kty"], "kid": key["kid"], "use": key["use"], "n": key["n"], "e": key["e"], } return rsa_key @staticmethod def _get_config_from_file(fname: str) -> dict: with open(fname) as config_file: return json.load(config_file) def keycloak_client(config: dict) -> KeycloakOpenID: creds = config.get("credentials", None) secret_key = creds["secret"] if creds else None return KeycloakOpenID( server_url=config["auth-server-url"] + "/", realm_name=config["realm"], client_id=config["resource"], client_secret_key=secret_key, )
""" Copyright (c) 2018 iCyP Released under the MIT license https://opensource.org/licenses/mit-license.php """ from .. import V_Types as VRM_Types def bone(node) -> VRM_Types.Node: v_node = VRM_Types.Node() if "name" in node: v_node.name = node["name"] else: v_node.name = "tmp" v_node.position = node["translation"] v_node.rotation = node.get("rotation", (0, 0, 0, 1)) v_node.scale = node.get("scale", (1, 1, 1)) if "children" in node: if type(node["children"]) is int: v_node.children = [node["children"]] else: v_node.children = node["children"] else: v_node.children = None if "mesh" in node: v_node.mesh_id = node["mesh"] if "skin" in node: v_node.skin_id = node["skin"] return v_node def material(mat, ext_mat, use_simple_principled_material) -> VRM_Types.Material: # standard, or VRM unsupported shader(no saved) if ( ext_mat["shader"] == "VRM_USE_GLTFSHADER" or ext_mat["shader"] not in ["VRM/MToon", "VRM/UnlitTransparentZWrite"] or use_simple_principled_material ): v_mat = VRM_Types.Material_GLTF() v_mat.name = mat["name"] v_mat.shader_name = "gltf" if "pbrMetallicRoughness" in mat: pbrmat = mat["pbrMetallicRoughness"] if "baseColorTexture" in pbrmat: texture_index = pbrmat["baseColorTexture"]["index"] v_mat.color_texture_index = texture_index v_mat.color_texcoord_index = pbrmat["baseColorTexture"]["texCoord"] if "baseColorFactor" in pbrmat: v_mat.base_color = pbrmat["baseColorFactor"] if "metallicFactor" in pbrmat: v_mat.metallic_factor = pbrmat["metallicFactor"] if "roughnessFactor" in pbrmat: v_mat.roughness_factor = pbrmat["roughnessFactor"] if "metallicRoughnessTexture" in pbrmat: texture_index = pbrmat["metallicRoughnessTexture"]["index"] v_mat.metallic_roughness_texture_index = texture_index v_mat.metallic_roughness_texture_texcoord = pbrmat["baseColorTexture"][ "texCoord" ] if "normalTexture" in mat: v_mat.normal_texture_index = mat["normalTexture"]["index"] v_mat.normal_texture_texcoord_index = mat["normalTexture"]["texCoord"] if "emissiveTexture" in mat: v_mat.emissive_texture_index = mat["emissiveTexture"]["index"] v_mat.emissive_texture_texcoord_index = mat["emissiveTexture"]["texCoord"] if "occlusionTexture" in mat: v_mat.occlusion_texture_index = mat["occlusionTexture"]["index"] v_mat.occlusion_texture_texcoord_index = mat["occlusionTexture"]["texCoord"] if "emissiveFactor" in mat: v_mat.emissive_factor = mat["emissiveFactor"] if "doubleSided" in mat: v_mat.double_sided = mat["doubleSided"] if "alphaMode" in mat: if mat["alphaMode"] == "MASK": v_mat.alpha_mode = "MASK" if mat.get("alphaCutoff"): v_mat.alphaCutoff = mat.get("alphaCutoff") else: v_mat.alphaCutoff = 0.5 elif mat["alphaMode"] == "BLEND": v_mat.alpha_mode = "Z_TRANSPARENCY" elif mat["alphaMode"] == "OPAQUE": v_mat.alpha_mode = "OPAQUE" if "extensions" in mat: if "KHR_materials_unlit" in mat["extensions"]: v_mat.shadeless = 1 # 0 is shade ,1 is shadeless else: # "MToon or Transparent_Zwrite" if ext_mat["shader"] == "VRM/MToon": v_mat = VRM_Types.Material_MToon() v_mat.name = ext_mat["name"] v_mat.shader_name = ext_mat["shader"] # region check unknown props exist subset = { "float": ext_mat["floatProperties"].keys() - v_mat.float_props_dic.keys(), "vector": ext_mat["vectorProperties"].keys() - v_mat.vector_props_dic.keys(), "texture": ext_mat["textureProperties"].keys() - v_mat.texture_index_dic.keys(), "keyword": ext_mat["keywordMap"].keys() - v_mat.keyword_dic.keys(), } for k, _subset in subset.items(): if _subset: print( "unknown {} properties {} in {}".format( k, _subset, ext_mat["name"] ) ) # endregion check unknown props exit v_mat.float_props_dic.update(ext_mat["floatProperties"]) v_mat.vector_props_dic.update(ext_mat["vectorProperties"]) v_mat.texture_index_dic.update(ext_mat["textureProperties"]) v_mat.keyword_dic.update(ext_mat["keywordMap"]) v_mat.tag_dic.update(ext_mat["tagMap"]) elif ext_mat["shader"] == "VRM/UnlitTransparentZWrite": v_mat = VRM_Types.Material_Transparent_Z_write() v_mat.name = ext_mat["name"] v_mat.shader_name = ext_mat["shader"] v_mat.float_props_dic = ext_mat["floatProperties"] v_mat.vector_props_dic = ext_mat["vectorProperties"] v_mat.texture_index_dic = ext_mat["textureProperties"] else: # ここには入らないはず print( f"Unknown(or legacy) shader :material {ext_mat["name"]} is {ext_mat["shader"]}" ) return v_mat
""" Copyright (c) 2018 iCyP Released under the MIT license https://opensource.org/licenses/mit-license.php """ from .. import V_Types as VRM_Types def bone(node) -> VRM_Types.Node: v_node = VRM_Types.Node() if "name" in node: v_node.name = node["name"] else: v_node.name = "tmp" v_node.position = node["translation"] v_node.rotation = node.get("rotation", (0, 0, 0, 1)) v_node.scale = node.get("scale", (1, 1, 1)) if "children" in node: if type(node["children"]) is int: v_node.children = [node["children"]] else: v_node.children = node["children"] else: v_node.children = None if "mesh" in node: v_node.mesh_id = node["mesh"] if "skin" in node: v_node.skin_id = node["skin"] return v_node def material(mat, ext_mat, use_simple_principled_material) -> VRM_Types.Material: # standard, or VRM unsupported shader(no saved) if ( ext_mat["shader"] == "VRM_USE_GLTFSHADER" or ext_mat["shader"] not in ["VRM/MToon", "VRM/UnlitTransparentZWrite"] or use_simple_principled_material ): v_mat = VRM_Types.Material_GLTF() v_mat.name = mat["name"] v_mat.shader_name = "gltf" if "pbrMetallicRoughness" in mat: pbrmat = mat["pbrMetallicRoughness"] if "baseColorTexture" in pbrmat: texture_index = pbrmat["baseColorTexture"]["index"] v_mat.color_texture_index = texture_index v_mat.color_texcoord_index = pbrmat["baseColorTexture"]["texCoord"] if "baseColorFactor" in pbrmat: v_mat.base_color = pbrmat["baseColorFactor"] if "metallicFactor" in pbrmat: v_mat.metallic_factor = pbrmat["metallicFactor"] if "roughnessFactor" in pbrmat: v_mat.roughness_factor = pbrmat["roughnessFactor"] if "metallicRoughnessTexture" in pbrmat: texture_index = pbrmat["metallicRoughnessTexture"]["index"] v_mat.metallic_roughness_texture_index = texture_index v_mat.metallic_roughness_texture_texcoord = pbrmat["baseColorTexture"][ "texCoord" ] if "normalTexture" in mat: v_mat.normal_texture_index = mat["normalTexture"]["index"] v_mat.normal_texture_texcoord_index = mat["normalTexture"]["texCoord"] if "emissiveTexture" in mat: v_mat.emissive_texture_index = mat["emissiveTexture"]["index"] v_mat.emissive_texture_texcoord_index = mat["emissiveTexture"]["texCoord"] if "occlusionTexture" in mat: v_mat.occlusion_texture_index = mat["occlusionTexture"]["index"] v_mat.occlusion_texture_texcoord_index = mat["occlusionTexture"]["texCoord"] if "emissiveFactor" in mat: v_mat.emissive_factor = mat["emissiveFactor"] if "doubleSided" in mat: v_mat.double_sided = mat["doubleSided"] if "alphaMode" in mat: if mat["alphaMode"] == "MASK": v_mat.alpha_mode = "MASK" if mat.get("alphaCutoff"): v_mat.alphaCutoff = mat.get("alphaCutoff") else: v_mat.alphaCutoff = 0.5 elif mat["alphaMode"] == "BLEND": v_mat.alpha_mode = "Z_TRANSPARENCY" elif mat["alphaMode"] == "OPAQUE": v_mat.alpha_mode = "OPAQUE" if "extensions" in mat: if "KHR_materials_unlit" in mat["extensions"]: v_mat.shadeless = 1 # 0 is shade ,1 is shadeless else: # "MToon or Transparent_Zwrite" if ext_mat["shader"] == "VRM/MToon": v_mat = VRM_Types.Material_MToon() v_mat.name = ext_mat["name"] v_mat.shader_name = ext_mat["shader"] # region check unknown props exist subset = { "float": ext_mat["floatProperties"].keys() - v_mat.float_props_dic.keys(), "vector": ext_mat["vectorProperties"].keys() - v_mat.vector_props_dic.keys(), "texture": ext_mat["textureProperties"].keys() - v_mat.texture_index_dic.keys(), "keyword": ext_mat["keywordMap"].keys() - v_mat.keyword_dic.keys(), } for k, _subset in subset.items(): if _subset: print( "unknown {} properties {} in {}".format( k, _subset, ext_mat["name"] ) ) # endregion check unknown props exit v_mat.float_props_dic.update(ext_mat["floatProperties"]) v_mat.vector_props_dic.update(ext_mat["vectorProperties"]) v_mat.texture_index_dic.update(ext_mat["textureProperties"]) v_mat.keyword_dic.update(ext_mat["keywordMap"]) v_mat.tag_dic.update(ext_mat["tagMap"]) elif ext_mat["shader"] == "VRM/UnlitTransparentZWrite": v_mat = VRM_Types.Material_Transparent_Z_write() v_mat.name = ext_mat["name"] v_mat.shader_name = ext_mat["shader"] v_mat.float_props_dic = ext_mat["floatProperties"] v_mat.vector_props_dic = ext_mat["vectorProperties"] v_mat.texture_index_dic = ext_mat["textureProperties"] else: # ここには入らないはず print( f"Unknown(or legacy) shader :material {ext_mat['name']} is {ext_mat['shader']}" ) return v_mat
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """Generic utility helpers.""" import contextlib import inspect import logging import os from argparse import Namespace from collections.abc import ItemsView from functools import lru_cache from pathlib import Path from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union import yaml from ansible import constants from ansible.errors import AnsibleError, AnsibleParserError from ansible.parsing.dataloader import DataLoader from ansible.parsing.mod_args import ModuleArgsParser from ansible.parsing.splitter import split_args from ansible.parsing.yaml.constructor import AnsibleConstructor from ansible.parsing.yaml.loader import AnsibleLoader from ansible.parsing.yaml.objects import AnsibleSequence from ansible.plugins.loader import add_all_plugin_dirs from ansible.template import Templar try: from ansible.module_utils.parsing.convert_bool import boolean except ImportError: try: from ansible.utils.boolean import boolean except ImportError: try: from ansible.utils import boolean except ImportError: boolean = constants.mk_boolean from yaml.composer import Composer from yaml.representer import RepresenterError from ansiblelint._internal.rules import ( AnsibleParserErrorRule, LoadingFailureRule, RuntimeErrorRule, ) from ansiblelint.constants import FileType from ansiblelint.errors import MatchError from ansiblelint.file_utils import Lintable, get_yaml_files # ansible-lint doesn't need/want to know about encrypted secrets, so we pass a # string as the password to enable such yaml files to be opened and parsed # successfully. DEFAULT_VAULT_PASSWORD = 'x' PLAYBOOK_DIR = os.environ.get('ANSIBLE_PLAYBOOK_DIR', None) _logger = logging.getLogger(__name__) def parse_yaml_from_file(filepath: str) -> dict: dl = DataLoader() if hasattr(dl, 'set_vault_password'): dl.set_vault_password(DEFAULT_VAULT_PASSWORD) return dl.load_from_file(filepath) def path_dwim(basedir: str, given: str) -> str: dl = DataLoader() dl.set_basedir(basedir) return dl.path_dwim(given) def ansible_template(basedir: str, varname: Any, templatevars, **kwargs) -> Any: dl = DataLoader() dl.set_basedir(basedir) templar = Templar(dl, variables=templatevars) return templar.template(varname, **kwargs) LINE_NUMBER_KEY = '__line__' FILENAME_KEY = '__file__' VALID_KEYS = [ 'name', 'action', 'when', 'async', 'poll', 'notify', 'first_available_file', 'include', 'include_tasks', 'import_tasks', 'import_playbook', 'tags', 'register', 'ignore_errors', 'delegate_to', 'local_action', 'transport', 'remote_user', 'sudo', 'sudo_user', 'sudo_pass', 'when', 'connection', 'environment', 'args', 'any_errors_fatal', 'changed_when', 'failed_when', 'check_mode', 'delay', 'retries', 'until', 'su', 'su_user', 'su_pass', 'no_log', 'run_once', 'become', 'become_user', 'become_method', FILENAME_KEY, ] BLOCK_NAME_TO_ACTION_TYPE_MAP = { 'tasks': 'task', 'handlers': 'handler', 'pre_tasks': 'task', 'post_tasks': 'task', 'block': 'meta', 'rescue': 'meta', 'always': 'meta', } def tokenize(line: str) -> Tuple[str, List[str], Dict]: tokens = line.lstrip().split(" ") if tokens[0] == '-': tokens = tokens[1:] if tokens[0] == 'action:' or tokens[0] == 'local_action:': tokens = tokens[1:] command = tokens[0].replace(":", "") args = list() kwargs = dict() nonkvfound = False for arg in tokens[1:]: if "=" in arg and not nonkvfound: kv = arg.split("=", 1) kwargs[kv[0]] = kv[1] else: nonkvfound = True args.append(arg) return (command, args, kwargs) def _playbook_items(pb_data: dict) -> ItemsView: if isinstance(pb_data, dict): return pb_data.items() if not pb_data: return [] # "if play" prevents failure if the play sequence contains None, # which is weird but currently allowed by Ansible # https://github.com/ansible-community/ansible-lint/issues/849 return [item for play in pb_data if play for item in play.items()] def _set_collections_basedir(basedir: str): # Sets the playbook directory as playbook_paths for the collection loader try: # Ansible 2.10+ # noqa: # pylint:disable=cyclic-import,import-outside-toplevel from ansible.utils.collection_loader import AnsibleCollectionConfig AnsibleCollectionConfig.playbook_paths = basedir except ImportError: # Ansible 2.8 or 2.9 # noqa: # pylint:disable=cyclic-import,import-outside-toplevel from ansible.utils.collection_loader import set_collection_playbook_paths set_collection_playbook_paths(basedir) def find_children(lintable: Lintable) -> List[Lintable]: # noqa: C901 if not lintable.path.exists(): return [] playbook_dir = str(lintable.path.parent) _set_collections_basedir(playbook_dir or os.path.abspath('.')) add_all_plugin_dirs(playbook_dir or '.') if lintable.kind == 'role': playbook_ds = {'roles': [{'role': str(lintable.path)}]} elif lintable.kind not in ("playbook", "tasks"): return [] else: try: playbook_ds = parse_yaml_from_file(str(lintable.path)) except AnsibleError as e: raise SystemExit(str(e)) results = [] basedir = os.path.dirname(str(lintable.path)) # playbook_ds can be an AnsibleUnicode string, which we consider invalid if isinstance(playbook_ds, str): raise MatchError(filename=str(lintable.path), rule=LoadingFailureRule) for item in _playbook_items(playbook_ds): for child in play_children(basedir, item, lintable.kind, playbook_dir): # We avoid processing parametrized children path_str = str(child.path) if "$" in path_str or "{{" in path_str: continue # Repair incorrect paths obtained when old syntax was used, like: # - include: simpletask.yml tags=nginx valid_tokens = list() for token in split_args(path_str): if '=' in token: break valid_tokens.append(token) path = ' '.join(valid_tokens) if path != path_str: child.path = Path(path) child.name = child.path.name results.append(child) return results def template( basedir: str, value: Any, variables, fail_on_undefined=False, **kwargs ) -> Any: try: value = ansible_template( os.path.abspath(basedir), value, variables, **dict(kwargs, fail_on_undefined=fail_on_undefined), ) # Hack to skip the following exception when using to_json filter on a variable. # I guess the filter doesn't like empty vars... except (AnsibleError, ValueError, RepresenterError): # templating failed, so just keep value as is. pass return value def play_children( basedir: str, item: Tuple[str, Any], parent_type, playbook_dir ) -> List[Lintable]: delegate_map: Dict[str, Callable[[str, Any, Any, FileType], List[Lintable]]] = { 'tasks': _taskshandlers_children, 'pre_tasks': _taskshandlers_children, 'post_tasks': _taskshandlers_children, 'block': _taskshandlers_children, 'include': _include_children, 'import_playbook': _include_children, 'roles': _roles_children, 'dependencies': _roles_children, 'handlers': _taskshandlers_children, 'include_tasks': _include_children, 'import_tasks': _include_children, } (k, v) = item add_all_plugin_dirs(os.path.abspath(basedir)) if k in delegate_map: if v: v = template( os.path.abspath(basedir), v, dict(playbook_dir=PLAYBOOK_DIR or os.path.abspath(basedir)), fail_on_undefined=False, ) return delegate_map[k](basedir, k, v, parent_type) return [] def _include_children(basedir: str, k, v, parent_type) -> List[Lintable]: # handle special case include_tasks: name=filename.yml if k == 'include_tasks' and isinstance(v, dict) and 'file' in v: v = v['file'] # handle include: filename.yml tags=blah (command, args, kwargs) = tokenize("{0}: {1}".format(k, v)) result = path_dwim(basedir, args[0]) if not os.path.exists(result): result = path_dwim(os.path.join(os.path.dirname(basedir)), v) return [Lintable(result, kind=parent_type)] def _taskshandlers_children(basedir, k, v, parent_type: FileType) -> List[Lintable]: results: List[Lintable] = [] if v is None: raise MatchError( message="A malformed block was encountered while loading a block.", rule=RuntimeErrorRule(), ) for th in v: # ignore empty tasks, `-` if not th: continue with contextlib.suppress(LookupError): children = _get_task_handler_children_for_tasks_or_playbooks( th, basedir, k, parent_type, ) results.append(children) continue if ( 'include_role' in th or 'import_role' in th ): # lgtm [py/unreachable-statement] th = normalize_task_v2(th) _validate_task_handler_action_for_role(th['action']) results.extend( _roles_children( basedir, k, [th['action'].get("name")], parent_type, main=th['action'].get('tasks_from', 'main'), ) ) continue if 'block' not in th: continue results.extend(_taskshandlers_children(basedir, k, th['block'], parent_type)) if 'rescue' in th: results.extend( _taskshandlers_children(basedir, k, th['rescue'], parent_type) ) if 'always' in th: results.extend( _taskshandlers_children(basedir, k, th['always'], parent_type) ) return results def _get_task_handler_children_for_tasks_or_playbooks( task_handler, basedir: str, k, parent_type: FileType, ) -> Lintable: """Try to get children of taskhandler for include/import tasks/playbooks.""" child_type = k if parent_type == 'playbook' else parent_type task_include_keys = 'include', 'include_tasks', 'import_playbook', 'import_tasks' for task_handler_key in task_include_keys: with contextlib.suppress(KeyError): # ignore empty tasks if not task_handler: continue return Lintable( path_dwim(basedir, task_handler[task_handler_key]), kind=child_type ) raise LookupError( f'The node contains none of: {', '.join(task_include_keys)}', ) def _validate_task_handler_action_for_role(th_action: dict) -> None: """Verify that the task handler action is valid for role include.""" module = th_action['__ansible_module__'] if 'name' not in th_action: raise MatchError(message=f"Failed to find required 'name' key in {module!s}") if not isinstance(th_action['name'], str): raise MatchError( message=f"Value assigned to 'name' key on '{module!s}' is not a string.", ) def _roles_children( basedir: str, k, v, parent_type: FileType, main='main' ) -> List[Lintable]: results: List[Lintable] = [] for role in v: if isinstance(role, dict): if 'role' in role or 'name' in role: if 'tags' not in role or 'skip_ansible_lint' not in role['tags']: results.extend( _look_for_role_files( basedir, role.get('role', role.get('name')), main=main ) ) elif k != 'dependencies': raise SystemExit( 'role dict {0} does not contain a "role" ' 'or "name" key'.format(role) ) else: results.extend(_look_for_role_files(basedir, role, main=main)) return results def _rolepath(basedir: str, role: str) -> Optional[str]: role_path = None possible_paths = [ # if included from a playbook path_dwim(basedir, os.path.join('roles', role)), path_dwim(basedir, role), # if included from roles/[role]/meta/main.yml path_dwim(basedir, os.path.join('..', '..', '..', 'roles', role)), path_dwim(basedir, os.path.join('..', '..', role)), # if checking a role in the current directory path_dwim(basedir, os.path.join('..', role)), ] if constants.DEFAULT_ROLES_PATH: search_locations = constants.DEFAULT_ROLES_PATH if isinstance(search_locations, str): search_locations = search_locations.split(os.pathsep) for loc in search_locations: loc = os.path.expanduser(loc) possible_paths.append(path_dwim(loc, role)) possible_paths.append(path_dwim(basedir, '')) for path_option in possible_paths: if os.path.isdir(path_option): role_path = path_option break if role_path: add_all_plugin_dirs(role_path) return role_path def _look_for_role_files(basedir: str, role: str, main='main') -> List[Lintable]: role_path = _rolepath(basedir, role) if not role_path: return [] results = [] for kind in ['tasks', 'meta', 'handlers']: current_path = os.path.join(role_path, kind) for folder, subdirs, files in os.walk(current_path): for file in files: file_ignorecase = file.lower() if file_ignorecase.endswith(('.yml', '.yaml')): thpath = os.path.join(folder, file) # TODO(ssbarnea): Find correct way to pass kind: FileType results.append(Lintable(thpath, kind=kind)) # type: ignore return results def rolename(filepath): idx = filepath.find('roles/') if idx < 0: return '' role = filepath[idx + 6 :] role = role[: role.find('/')] return role def _kv_to_dict(v): (command, args, kwargs) = tokenize(v) return dict(__ansible_module__=command, __ansible_arguments__=args, **kwargs) def _sanitize_task(task: dict) -> dict: """Return a stripped-off task structure compatible with new Ansible. This helper takes a copy of the incoming task and drops any internally used keys from it. """ result = task.copy() # task is an AnsibleMapping which inherits from OrderedDict, so we need # to use `del` to remove unwanted keys. for k in ['skipped_rules', FILENAME_KEY, LINE_NUMBER_KEY]: if k in result: del result[k] return result def normalize_task_v2(task: Dict[str, Any]) -> Dict[str, Any]: """Ensure tasks have an action key and strings are converted to python objects.""" result = dict() sanitized_task = _sanitize_task(task) mod_arg_parser = ModuleArgsParser(sanitized_task) try: action, arguments, result['delegate_to'] = mod_arg_parser.parse() except AnsibleParserError as e: raise MatchError( rule=AnsibleParserErrorRule(), message=e.message, filename=task.get(FILENAME_KEY, "Unknown"), linenumber=task.get(LINE_NUMBER_KEY, 0), ) # denormalize shell -> command conversion if '_uses_shell' in arguments: action = 'shell' del arguments['_uses_shell'] for (k, v) in list(task.items()): if k in ('action', 'local_action', 'args', 'delegate_to') or k == action: # we don't want to re-assign these values, which were # determined by the ModuleArgsParser() above continue result[k] = v result['action'] = dict(__ansible_module__=action) if '_raw_params' in arguments: result['action']['__ansible_arguments__'] = arguments['_raw_params'].split(' ') del arguments['_raw_params'] else: result['action']['__ansible_arguments__'] = list() if 'argv' in arguments and not result['action']['__ansible_arguments__']: result['action']['__ansible_arguments__'] = arguments['argv'] del arguments['argv'] result['action'].update(arguments) return result def normalize_task_v1(task): # noqa: C901 result = dict() for (k, v) in task.items(): if k in VALID_KEYS or k.startswith('with_'): if k in ('local_action', 'action'): if not isinstance(v, dict): v = _kv_to_dict(v) v['__ansible_arguments__'] = v.get('__ansible_arguments__', list()) result['action'] = v else: result[k] = v else: if isinstance(v, str): v = _kv_to_dict(k + ' ' + v) elif not v: v = dict(__ansible_module__=k) else: if isinstance(v, dict): v.update(dict(__ansible_module__=k)) else: if k == '__line__': # Keep the line number stored result[k] = v continue # Tasks that include playbooks (rather than task files) # can get here # https://github.com/ansible-community/ansible-lint/issues/138 raise RuntimeError( "Was not expecting value %s of type %s for key %s\n" "Task: %s. Check the syntax of your playbook using " "ansible-playbook --syntax-check" % (str(v), type(v), k, str(task)) ) v['__ansible_arguments__'] = v.get('__ansible_arguments__', list()) result['action'] = v if 'module' in result['action']: # this happens when a task uses # local_action: # module: ec2 # etc... result['action']['__ansible_module__'] = result['action']['module'] del result['action']['module'] if 'args' in result: result['action'].update(result.get('args')) del result['args'] return result def normalize_task(task: Dict[str, Any], filename: str) -> Dict[str, Any]: ansible_action_type = task.get('__ansible_action_type__', 'task') if '__ansible_action_type__' in task: del task['__ansible_action_type__'] task = normalize_task_v2(task) task[FILENAME_KEY] = filename task['__ansible_action_type__'] = ansible_action_type return task def task_to_str(task: Dict[str, Any]) -> str: name = task.get("name") if name: return str(name) action = task.get("action") if isinstance(action, str) or not isinstance(action, dict): return str(action) args = " ".join( [ "{0}={1}".format(k, v) for (k, v) in action.items() if k not in [ "__ansible_module__", "__ansible_arguments__", "__line__", "__file__", ] ] ) for item in action.get("__ansible_arguments__", []): args += f" {item}" return u"{0} {1}".format(action["__ansible_module__"], args) def extract_from_list(blocks, candidates: List[str]) -> List[Any]: results = list() for block in blocks: for candidate in candidates: if isinstance(block, dict) and candidate in block: if isinstance(block[candidate], list): results.extend(add_action_type(block[candidate], candidate)) elif block[candidate] is not None: raise RuntimeError( "Key '%s' defined, but bad value: '%s'" % (candidate, str(block[candidate])) ) return results def add_action_type(actions, action_type: str) -> List[Any]: results = list() for action in actions: # ignore empty task if not action: continue action['__ansible_action_type__'] = BLOCK_NAME_TO_ACTION_TYPE_MAP[action_type] results.append(action) return results def get_action_tasks(yaml, file: Lintable) -> List[Any]: tasks = list() if file.kind in ['tasks', 'handlers']: tasks = add_action_type(yaml, file.kind) else: tasks.extend( extract_from_list(yaml, ['tasks', 'handlers', 'pre_tasks', 'post_tasks']) ) # Add sub-elements of block/rescue/always to tasks list tasks.extend(extract_from_list(tasks, ['block', 'rescue', 'always'])) # Remove block/rescue/always elements from tasks list block_rescue_always = ('block', 'rescue', 'always') tasks[:] = [ task for task in tasks if all(k not in task for k in block_rescue_always) ] return [ task for task in tasks if set( ['include', 'include_tasks', 'import_playbook', 'import_tasks'] ).isdisjoint(task.keys()) ] def get_normalized_tasks(yaml, file: Lintable) -> List[Dict[str, Any]]: tasks = get_action_tasks(yaml, file) res = [] for task in tasks: # An empty `tags` block causes `None` to be returned if # the `or []` is not present - `task.get('tags', [])` # does not suffice. if 'skip_ansible_lint' in (task.get('tags') or []): # No need to normalize_task is we are skipping it. continue res.append(normalize_task(task, str(file.path))) return res @lru_cache(maxsize=128) def parse_yaml_linenumbers(data, filename): """Parse yaml as ansible.utils.parse_yaml but with linenumbers. The line numbers are stored in each node's LINE_NUMBER_KEY key. """ def compose_node(parent, index): # the line number where the previous token has ended (plus empty lines) line = loader.line node = Composer.compose_node(loader, parent, index) node.__line__ = line + 1 return node def construct_mapping(node, deep=False): mapping = AnsibleConstructor.construct_mapping(loader, node, deep=deep) if hasattr(node, '__line__'): mapping[LINE_NUMBER_KEY] = node.__line__ else: mapping[LINE_NUMBER_KEY] = mapping._line_number mapping[FILENAME_KEY] = filename return mapping try: kwargs = {} if 'vault_password' in inspect.getfullargspec(AnsibleLoader.__init__).args: kwargs['vault_password'] = DEFAULT_VAULT_PASSWORD loader = AnsibleLoader(data, **kwargs) loader.compose_node = compose_node loader.construct_mapping = construct_mapping data = loader.get_single_data() except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e: raise SystemExit("Failed to parse YAML in %s: %s" % (filename, str(e))) return data def get_first_cmd_arg(task: Dict[str, Any]) -> Any: try: if 'cmd' in task['action']: first_cmd_arg = task['action']['cmd'].split()[0] else: first_cmd_arg = task['action']['__ansible_arguments__'][0] except IndexError: return None return first_cmd_arg def is_playbook(filename: str) -> bool: """ Check if the file is a playbook. Given a filename, it should return true if it looks like a playbook. The function is not supposed to raise exceptions. """ # we assume is a playbook if we loaded a sequence of dictionaries where # at least one of these keys is present: playbooks_keys = { "gather_facts", "hosts", "import_playbook", "post_tasks", "pre_tasks", "roles", "tasks", } # makes it work with Path objects by converting them to strings if not isinstance(filename, str): filename = str(filename) try: f = parse_yaml_from_file(filename) except Exception as e: _logger.warning( "Failed to load %s with %s, assuming is not a playbook.", filename, e ) else: if ( isinstance(f, AnsibleSequence) and hasattr(next(iter(f), {}), 'keys') and playbooks_keys.intersection(next(iter(f), {}).keys()) ): return True return False # pylint: disable=too-many-statements def get_lintables( options: Namespace = Namespace(), args: Optional[List[str]] = None ) -> List[Lintable]: """Detect files and directories that are lintable.""" lintables: List[Lintable] = [] # passing args bypass auto-detection mode if args: for arg in args: lintable = Lintable(arg) if lintable.kind in ("yaml", None): _logger.warning( "Overriding detected file kind '%s' with 'playbook' " "for given positional argument: %s", lintable.kind, arg, ) lintable = Lintable(arg, kind="playbook") lintables.append(lintable) else: for filename in get_yaml_files(options): p = Path(filename) # skip exclusions try: for file_path in options.exclude_paths: if str(p.resolve()).startswith(str(file_path)): raise FileNotFoundError( f'File {file_path} matched exclusion entry: {p}' ) except FileNotFoundError as e: _logger.debug('Ignored %s due to: %s', p, e) continue lintables.append(Lintable(p)) # stage 2: guess roles from current lintables, as there is no unique # file that must be present in any kind of role. _extend_with_roles(lintables) return lintables def _extend_with_roles(lintables: List[Lintable]) -> None: """Detect roles among lintables and adds them to the list.""" for lintable in lintables: parts = lintable.path.parent.parts if 'roles' in parts: role = lintable.path while role.parent.name != "roles" and role.name: role = role.parent if role.exists: lintable = Lintable(role, kind="role") if lintable not in lintables: _logger.debug("Added role: %s", lintable) lintables.append(lintable) def convert_to_boolean(value: Any) -> bool: """Use Ansible to convert something to a boolean.""" return bool(boolean(value)) def nested_items( data: Union[Dict[Any, Any], List[Any]] ) -> Generator[Tuple[Any, Any], None, None]: """Iterate a nested data structure.""" if isinstance(data, dict): for k, v in data.items(): yield k, v for k, v in nested_items(v): yield k, v if isinstance(data, list): for item in data: yield "list-item", item for k, v in nested_items(item): yield k, v
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """Generic utility helpers.""" import contextlib import inspect import logging import os from argparse import Namespace from collections.abc import ItemsView from functools import lru_cache from pathlib import Path from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union import yaml from ansible import constants from ansible.errors import AnsibleError, AnsibleParserError from ansible.parsing.dataloader import DataLoader from ansible.parsing.mod_args import ModuleArgsParser from ansible.parsing.splitter import split_args from ansible.parsing.yaml.constructor import AnsibleConstructor from ansible.parsing.yaml.loader import AnsibleLoader from ansible.parsing.yaml.objects import AnsibleSequence from ansible.plugins.loader import add_all_plugin_dirs from ansible.template import Templar try: from ansible.module_utils.parsing.convert_bool import boolean except ImportError: try: from ansible.utils.boolean import boolean except ImportError: try: from ansible.utils import boolean except ImportError: boolean = constants.mk_boolean from yaml.composer import Composer from yaml.representer import RepresenterError from ansiblelint._internal.rules import ( AnsibleParserErrorRule, LoadingFailureRule, RuntimeErrorRule, ) from ansiblelint.constants import FileType from ansiblelint.errors import MatchError from ansiblelint.file_utils import Lintable, get_yaml_files # ansible-lint doesn't need/want to know about encrypted secrets, so we pass a # string as the password to enable such yaml files to be opened and parsed # successfully. DEFAULT_VAULT_PASSWORD = 'x' PLAYBOOK_DIR = os.environ.get('ANSIBLE_PLAYBOOK_DIR', None) _logger = logging.getLogger(__name__) def parse_yaml_from_file(filepath: str) -> dict: dl = DataLoader() if hasattr(dl, 'set_vault_password'): dl.set_vault_password(DEFAULT_VAULT_PASSWORD) return dl.load_from_file(filepath) def path_dwim(basedir: str, given: str) -> str: dl = DataLoader() dl.set_basedir(basedir) return dl.path_dwim(given) def ansible_template(basedir: str, varname: Any, templatevars, **kwargs) -> Any: dl = DataLoader() dl.set_basedir(basedir) templar = Templar(dl, variables=templatevars) return templar.template(varname, **kwargs) LINE_NUMBER_KEY = '__line__' FILENAME_KEY = '__file__' VALID_KEYS = [ 'name', 'action', 'when', 'async', 'poll', 'notify', 'first_available_file', 'include', 'include_tasks', 'import_tasks', 'import_playbook', 'tags', 'register', 'ignore_errors', 'delegate_to', 'local_action', 'transport', 'remote_user', 'sudo', 'sudo_user', 'sudo_pass', 'when', 'connection', 'environment', 'args', 'any_errors_fatal', 'changed_when', 'failed_when', 'check_mode', 'delay', 'retries', 'until', 'su', 'su_user', 'su_pass', 'no_log', 'run_once', 'become', 'become_user', 'become_method', FILENAME_KEY, ] BLOCK_NAME_TO_ACTION_TYPE_MAP = { 'tasks': 'task', 'handlers': 'handler', 'pre_tasks': 'task', 'post_tasks': 'task', 'block': 'meta', 'rescue': 'meta', 'always': 'meta', } def tokenize(line: str) -> Tuple[str, List[str], Dict]: tokens = line.lstrip().split(" ") if tokens[0] == '-': tokens = tokens[1:] if tokens[0] == 'action:' or tokens[0] == 'local_action:': tokens = tokens[1:] command = tokens[0].replace(":", "") args = list() kwargs = dict() nonkvfound = False for arg in tokens[1:]: if "=" in arg and not nonkvfound: kv = arg.split("=", 1) kwargs[kv[0]] = kv[1] else: nonkvfound = True args.append(arg) return (command, args, kwargs) def _playbook_items(pb_data: dict) -> ItemsView: if isinstance(pb_data, dict): return pb_data.items() if not pb_data: return [] # "if play" prevents failure if the play sequence contains None, # which is weird but currently allowed by Ansible # https://github.com/ansible-community/ansible-lint/issues/849 return [item for play in pb_data if play for item in play.items()] def _set_collections_basedir(basedir: str): # Sets the playbook directory as playbook_paths for the collection loader try: # Ansible 2.10+ # noqa: # pylint:disable=cyclic-import,import-outside-toplevel from ansible.utils.collection_loader import AnsibleCollectionConfig AnsibleCollectionConfig.playbook_paths = basedir except ImportError: # Ansible 2.8 or 2.9 # noqa: # pylint:disable=cyclic-import,import-outside-toplevel from ansible.utils.collection_loader import set_collection_playbook_paths set_collection_playbook_paths(basedir) def find_children(lintable: Lintable) -> List[Lintable]: # noqa: C901 if not lintable.path.exists(): return [] playbook_dir = str(lintable.path.parent) _set_collections_basedir(playbook_dir or os.path.abspath('.')) add_all_plugin_dirs(playbook_dir or '.') if lintable.kind == 'role': playbook_ds = {'roles': [{'role': str(lintable.path)}]} elif lintable.kind not in ("playbook", "tasks"): return [] else: try: playbook_ds = parse_yaml_from_file(str(lintable.path)) except AnsibleError as e: raise SystemExit(str(e)) results = [] basedir = os.path.dirname(str(lintable.path)) # playbook_ds can be an AnsibleUnicode string, which we consider invalid if isinstance(playbook_ds, str): raise MatchError(filename=str(lintable.path), rule=LoadingFailureRule) for item in _playbook_items(playbook_ds): for child in play_children(basedir, item, lintable.kind, playbook_dir): # We avoid processing parametrized children path_str = str(child.path) if "$" in path_str or "{{" in path_str: continue # Repair incorrect paths obtained when old syntax was used, like: # - include: simpletask.yml tags=nginx valid_tokens = list() for token in split_args(path_str): if '=' in token: break valid_tokens.append(token) path = ' '.join(valid_tokens) if path != path_str: child.path = Path(path) child.name = child.path.name results.append(child) return results def template( basedir: str, value: Any, variables, fail_on_undefined=False, **kwargs ) -> Any: try: value = ansible_template( os.path.abspath(basedir), value, variables, **dict(kwargs, fail_on_undefined=fail_on_undefined), ) # Hack to skip the following exception when using to_json filter on a variable. # I guess the filter doesn't like empty vars... except (AnsibleError, ValueError, RepresenterError): # templating failed, so just keep value as is. pass return value def play_children( basedir: str, item: Tuple[str, Any], parent_type, playbook_dir ) -> List[Lintable]: delegate_map: Dict[str, Callable[[str, Any, Any, FileType], List[Lintable]]] = { 'tasks': _taskshandlers_children, 'pre_tasks': _taskshandlers_children, 'post_tasks': _taskshandlers_children, 'block': _taskshandlers_children, 'include': _include_children, 'import_playbook': _include_children, 'roles': _roles_children, 'dependencies': _roles_children, 'handlers': _taskshandlers_children, 'include_tasks': _include_children, 'import_tasks': _include_children, } (k, v) = item add_all_plugin_dirs(os.path.abspath(basedir)) if k in delegate_map: if v: v = template( os.path.abspath(basedir), v, dict(playbook_dir=PLAYBOOK_DIR or os.path.abspath(basedir)), fail_on_undefined=False, ) return delegate_map[k](basedir, k, v, parent_type) return [] def _include_children(basedir: str, k, v, parent_type) -> List[Lintable]: # handle special case include_tasks: name=filename.yml if k == 'include_tasks' and isinstance(v, dict) and 'file' in v: v = v['file'] # handle include: filename.yml tags=blah (command, args, kwargs) = tokenize("{0}: {1}".format(k, v)) result = path_dwim(basedir, args[0]) if not os.path.exists(result): result = path_dwim(os.path.join(os.path.dirname(basedir)), v) return [Lintable(result, kind=parent_type)] def _taskshandlers_children(basedir, k, v, parent_type: FileType) -> List[Lintable]: results: List[Lintable] = [] if v is None: raise MatchError( message="A malformed block was encountered while loading a block.", rule=RuntimeErrorRule(), ) for th in v: # ignore empty tasks, `-` if not th: continue with contextlib.suppress(LookupError): children = _get_task_handler_children_for_tasks_or_playbooks( th, basedir, k, parent_type, ) results.append(children) continue if ( 'include_role' in th or 'import_role' in th ): # lgtm [py/unreachable-statement] th = normalize_task_v2(th) _validate_task_handler_action_for_role(th['action']) results.extend( _roles_children( basedir, k, [th['action'].get("name")], parent_type, main=th['action'].get('tasks_from', 'main'), ) ) continue if 'block' not in th: continue results.extend(_taskshandlers_children(basedir, k, th['block'], parent_type)) if 'rescue' in th: results.extend( _taskshandlers_children(basedir, k, th['rescue'], parent_type) ) if 'always' in th: results.extend( _taskshandlers_children(basedir, k, th['always'], parent_type) ) return results def _get_task_handler_children_for_tasks_or_playbooks( task_handler, basedir: str, k, parent_type: FileType, ) -> Lintable: """Try to get children of taskhandler for include/import tasks/playbooks.""" child_type = k if parent_type == 'playbook' else parent_type task_include_keys = 'include', 'include_tasks', 'import_playbook', 'import_tasks' for task_handler_key in task_include_keys: with contextlib.suppress(KeyError): # ignore empty tasks if not task_handler: continue return Lintable( path_dwim(basedir, task_handler[task_handler_key]), kind=child_type ) raise LookupError( f'The node contains none of: {", ".join(task_include_keys)}', ) def _validate_task_handler_action_for_role(th_action: dict) -> None: """Verify that the task handler action is valid for role include.""" module = th_action['__ansible_module__'] if 'name' not in th_action: raise MatchError(message=f"Failed to find required 'name' key in {module!s}") if not isinstance(th_action['name'], str): raise MatchError( message=f"Value assigned to 'name' key on '{module!s}' is not a string.", ) def _roles_children( basedir: str, k, v, parent_type: FileType, main='main' ) -> List[Lintable]: results: List[Lintable] = [] for role in v: if isinstance(role, dict): if 'role' in role or 'name' in role: if 'tags' not in role or 'skip_ansible_lint' not in role['tags']: results.extend( _look_for_role_files( basedir, role.get('role', role.get('name')), main=main ) ) elif k != 'dependencies': raise SystemExit( 'role dict {0} does not contain a "role" ' 'or "name" key'.format(role) ) else: results.extend(_look_for_role_files(basedir, role, main=main)) return results def _rolepath(basedir: str, role: str) -> Optional[str]: role_path = None possible_paths = [ # if included from a playbook path_dwim(basedir, os.path.join('roles', role)), path_dwim(basedir, role), # if included from roles/[role]/meta/main.yml path_dwim(basedir, os.path.join('..', '..', '..', 'roles', role)), path_dwim(basedir, os.path.join('..', '..', role)), # if checking a role in the current directory path_dwim(basedir, os.path.join('..', role)), ] if constants.DEFAULT_ROLES_PATH: search_locations = constants.DEFAULT_ROLES_PATH if isinstance(search_locations, str): search_locations = search_locations.split(os.pathsep) for loc in search_locations: loc = os.path.expanduser(loc) possible_paths.append(path_dwim(loc, role)) possible_paths.append(path_dwim(basedir, '')) for path_option in possible_paths: if os.path.isdir(path_option): role_path = path_option break if role_path: add_all_plugin_dirs(role_path) return role_path def _look_for_role_files(basedir: str, role: str, main='main') -> List[Lintable]: role_path = _rolepath(basedir, role) if not role_path: return [] results = [] for kind in ['tasks', 'meta', 'handlers']: current_path = os.path.join(role_path, kind) for folder, subdirs, files in os.walk(current_path): for file in files: file_ignorecase = file.lower() if file_ignorecase.endswith(('.yml', '.yaml')): thpath = os.path.join(folder, file) # TODO(ssbarnea): Find correct way to pass kind: FileType results.append(Lintable(thpath, kind=kind)) # type: ignore return results def rolename(filepath): idx = filepath.find('roles/') if idx < 0: return '' role = filepath[idx + 6 :] role = role[: role.find('/')] return role def _kv_to_dict(v): (command, args, kwargs) = tokenize(v) return dict(__ansible_module__=command, __ansible_arguments__=args, **kwargs) def _sanitize_task(task: dict) -> dict: """Return a stripped-off task structure compatible with new Ansible. This helper takes a copy of the incoming task and drops any internally used keys from it. """ result = task.copy() # task is an AnsibleMapping which inherits from OrderedDict, so we need # to use `del` to remove unwanted keys. for k in ['skipped_rules', FILENAME_KEY, LINE_NUMBER_KEY]: if k in result: del result[k] return result def normalize_task_v2(task: Dict[str, Any]) -> Dict[str, Any]: """Ensure tasks have an action key and strings are converted to python objects.""" result = dict() sanitized_task = _sanitize_task(task) mod_arg_parser = ModuleArgsParser(sanitized_task) try: action, arguments, result['delegate_to'] = mod_arg_parser.parse() except AnsibleParserError as e: raise MatchError( rule=AnsibleParserErrorRule(), message=e.message, filename=task.get(FILENAME_KEY, "Unknown"), linenumber=task.get(LINE_NUMBER_KEY, 0), ) # denormalize shell -> command conversion if '_uses_shell' in arguments: action = 'shell' del arguments['_uses_shell'] for (k, v) in list(task.items()): if k in ('action', 'local_action', 'args', 'delegate_to') or k == action: # we don't want to re-assign these values, which were # determined by the ModuleArgsParser() above continue result[k] = v result['action'] = dict(__ansible_module__=action) if '_raw_params' in arguments: result['action']['__ansible_arguments__'] = arguments['_raw_params'].split(' ') del arguments['_raw_params'] else: result['action']['__ansible_arguments__'] = list() if 'argv' in arguments and not result['action']['__ansible_arguments__']: result['action']['__ansible_arguments__'] = arguments['argv'] del arguments['argv'] result['action'].update(arguments) return result def normalize_task_v1(task): # noqa: C901 result = dict() for (k, v) in task.items(): if k in VALID_KEYS or k.startswith('with_'): if k in ('local_action', 'action'): if not isinstance(v, dict): v = _kv_to_dict(v) v['__ansible_arguments__'] = v.get('__ansible_arguments__', list()) result['action'] = v else: result[k] = v else: if isinstance(v, str): v = _kv_to_dict(k + ' ' + v) elif not v: v = dict(__ansible_module__=k) else: if isinstance(v, dict): v.update(dict(__ansible_module__=k)) else: if k == '__line__': # Keep the line number stored result[k] = v continue # Tasks that include playbooks (rather than task files) # can get here # https://github.com/ansible-community/ansible-lint/issues/138 raise RuntimeError( "Was not expecting value %s of type %s for key %s\n" "Task: %s. Check the syntax of your playbook using " "ansible-playbook --syntax-check" % (str(v), type(v), k, str(task)) ) v['__ansible_arguments__'] = v.get('__ansible_arguments__', list()) result['action'] = v if 'module' in result['action']: # this happens when a task uses # local_action: # module: ec2 # etc... result['action']['__ansible_module__'] = result['action']['module'] del result['action']['module'] if 'args' in result: result['action'].update(result.get('args')) del result['args'] return result def normalize_task(task: Dict[str, Any], filename: str) -> Dict[str, Any]: ansible_action_type = task.get('__ansible_action_type__', 'task') if '__ansible_action_type__' in task: del task['__ansible_action_type__'] task = normalize_task_v2(task) task[FILENAME_KEY] = filename task['__ansible_action_type__'] = ansible_action_type return task def task_to_str(task: Dict[str, Any]) -> str: name = task.get("name") if name: return str(name) action = task.get("action") if isinstance(action, str) or not isinstance(action, dict): return str(action) args = " ".join( [ "{0}={1}".format(k, v) for (k, v) in action.items() if k not in [ "__ansible_module__", "__ansible_arguments__", "__line__", "__file__", ] ] ) for item in action.get("__ansible_arguments__", []): args += f" {item}" return u"{0} {1}".format(action["__ansible_module__"], args) def extract_from_list(blocks, candidates: List[str]) -> List[Any]: results = list() for block in blocks: for candidate in candidates: if isinstance(block, dict) and candidate in block: if isinstance(block[candidate], list): results.extend(add_action_type(block[candidate], candidate)) elif block[candidate] is not None: raise RuntimeError( "Key '%s' defined, but bad value: '%s'" % (candidate, str(block[candidate])) ) return results def add_action_type(actions, action_type: str) -> List[Any]: results = list() for action in actions: # ignore empty task if not action: continue action['__ansible_action_type__'] = BLOCK_NAME_TO_ACTION_TYPE_MAP[action_type] results.append(action) return results def get_action_tasks(yaml, file: Lintable) -> List[Any]: tasks = list() if file.kind in ['tasks', 'handlers']: tasks = add_action_type(yaml, file.kind) else: tasks.extend( extract_from_list(yaml, ['tasks', 'handlers', 'pre_tasks', 'post_tasks']) ) # Add sub-elements of block/rescue/always to tasks list tasks.extend(extract_from_list(tasks, ['block', 'rescue', 'always'])) # Remove block/rescue/always elements from tasks list block_rescue_always = ('block', 'rescue', 'always') tasks[:] = [ task for task in tasks if all(k not in task for k in block_rescue_always) ] return [ task for task in tasks if set( ['include', 'include_tasks', 'import_playbook', 'import_tasks'] ).isdisjoint(task.keys()) ] def get_normalized_tasks(yaml, file: Lintable) -> List[Dict[str, Any]]: tasks = get_action_tasks(yaml, file) res = [] for task in tasks: # An empty `tags` block causes `None` to be returned if # the `or []` is not present - `task.get('tags', [])` # does not suffice. if 'skip_ansible_lint' in (task.get('tags') or []): # No need to normalize_task is we are skipping it. continue res.append(normalize_task(task, str(file.path))) return res @lru_cache(maxsize=128) def parse_yaml_linenumbers(data, filename): """Parse yaml as ansible.utils.parse_yaml but with linenumbers. The line numbers are stored in each node's LINE_NUMBER_KEY key. """ def compose_node(parent, index): # the line number where the previous token has ended (plus empty lines) line = loader.line node = Composer.compose_node(loader, parent, index) node.__line__ = line + 1 return node def construct_mapping(node, deep=False): mapping = AnsibleConstructor.construct_mapping(loader, node, deep=deep) if hasattr(node, '__line__'): mapping[LINE_NUMBER_KEY] = node.__line__ else: mapping[LINE_NUMBER_KEY] = mapping._line_number mapping[FILENAME_KEY] = filename return mapping try: kwargs = {} if 'vault_password' in inspect.getfullargspec(AnsibleLoader.__init__).args: kwargs['vault_password'] = DEFAULT_VAULT_PASSWORD loader = AnsibleLoader(data, **kwargs) loader.compose_node = compose_node loader.construct_mapping = construct_mapping data = loader.get_single_data() except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e: raise SystemExit("Failed to parse YAML in %s: %s" % (filename, str(e))) return data def get_first_cmd_arg(task: Dict[str, Any]) -> Any: try: if 'cmd' in task['action']: first_cmd_arg = task['action']['cmd'].split()[0] else: first_cmd_arg = task['action']['__ansible_arguments__'][0] except IndexError: return None return first_cmd_arg def is_playbook(filename: str) -> bool: """ Check if the file is a playbook. Given a filename, it should return true if it looks like a playbook. The function is not supposed to raise exceptions. """ # we assume is a playbook if we loaded a sequence of dictionaries where # at least one of these keys is present: playbooks_keys = { "gather_facts", "hosts", "import_playbook", "post_tasks", "pre_tasks", "roles", "tasks", } # makes it work with Path objects by converting them to strings if not isinstance(filename, str): filename = str(filename) try: f = parse_yaml_from_file(filename) except Exception as e: _logger.warning( "Failed to load %s with %s, assuming is not a playbook.", filename, e ) else: if ( isinstance(f, AnsibleSequence) and hasattr(next(iter(f), {}), 'keys') and playbooks_keys.intersection(next(iter(f), {}).keys()) ): return True return False # pylint: disable=too-many-statements def get_lintables( options: Namespace = Namespace(), args: Optional[List[str]] = None ) -> List[Lintable]: """Detect files and directories that are lintable.""" lintables: List[Lintable] = [] # passing args bypass auto-detection mode if args: for arg in args: lintable = Lintable(arg) if lintable.kind in ("yaml", None): _logger.warning( "Overriding detected file kind '%s' with 'playbook' " "for given positional argument: %s", lintable.kind, arg, ) lintable = Lintable(arg, kind="playbook") lintables.append(lintable) else: for filename in get_yaml_files(options): p = Path(filename) # skip exclusions try: for file_path in options.exclude_paths: if str(p.resolve()).startswith(str(file_path)): raise FileNotFoundError( f'File {file_path} matched exclusion entry: {p}' ) except FileNotFoundError as e: _logger.debug('Ignored %s due to: %s', p, e) continue lintables.append(Lintable(p)) # stage 2: guess roles from current lintables, as there is no unique # file that must be present in any kind of role. _extend_with_roles(lintables) return lintables def _extend_with_roles(lintables: List[Lintable]) -> None: """Detect roles among lintables and adds them to the list.""" for lintable in lintables: parts = lintable.path.parent.parts if 'roles' in parts: role = lintable.path while role.parent.name != "roles" and role.name: role = role.parent if role.exists: lintable = Lintable(role, kind="role") if lintable not in lintables: _logger.debug("Added role: %s", lintable) lintables.append(lintable) def convert_to_boolean(value: Any) -> bool: """Use Ansible to convert something to a boolean.""" return bool(boolean(value)) def nested_items( data: Union[Dict[Any, Any], List[Any]] ) -> Generator[Tuple[Any, Any], None, None]: """Iterate a nested data structure.""" if isinstance(data, dict): for k, v in data.items(): yield k, v for k, v in nested_items(v): yield k, v if isinstance(data, list): for item in data: yield "list-item", item for k, v in nested_items(item): yield k, v
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 tt = ('1', '2', '3', '2', '4', '5', '1', '2', '3', '4', '5', '3', '3', '6', '7', '8') ''' 1 -> 2 2 -> 3 3 -> 4 ''' print(tt) print(' --- count ---') print(tt.count(1)) print(tt.count(2)) print(tt.count(3)) print(' --- index ---') print(tt.index('1')) # equivalent to tt.index(1, 0) print(tt.index('1', 1)) # print(tt.index(1, 7)) # raises exception print(' --- in ---') print('1' in tt) print('17' in tt) print(' --- loops ---') for value in tt: print(value) print(' --- list comprehensions ---') [print(x) for x in tt] print(' --- other objects ---') print(f'str.join(): {', '.join(tt)}') print(f'len(): {len(tt)}') print(' --- tuple concatenation ---') print((1, 2) + (3, 4, 5))
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 tt = ('1', '2', '3', '2', '4', '5', '1', '2', '3', '4', '5', '3', '3', '6', '7', '8') ''' 1 -> 2 2 -> 3 3 -> 4 ''' print(tt) print(' --- count ---') print(tt.count(1)) print(tt.count(2)) print(tt.count(3)) print(' --- index ---') print(tt.index('1')) # equivalent to tt.index(1, 0) print(tt.index('1', 1)) # print(tt.index(1, 7)) # raises exception print(' --- in ---') print('1' in tt) print('17' in tt) print(' --- loops ---') for value in tt: print(value) print(' --- list comprehensions ---') [print(x) for x in tt] print(' --- other objects ---') print(f'str.join(): {", ".join(tt)}') print(f'len(): {len(tt)}') print(' --- tuple concatenation ---') print((1, 2) + (3, 4, 5))
from pprint import pprint import socket import yaml from netmiko import ( Netmiko, NetmikoTimeoutException, NetmikoAuthenticationException, ) def send_show_command(device, commands): result = {} if type(commands) == str: commands = [commands] try: with Netmiko(**device) as ssh: ssh.enable() for command in commands: output = ssh.send_command(command) result[command] = output return result except NetmikoAuthenticationException as error: print(error) except socket.timeout: print(f"Timeout when connecting to {device["host"]}") if __name__ == "__main__": device = { "device_type": "cisco_ios_telnet", "host": "192.168.100.1", "username": "cisco", "password": "cisco", "secret": "cisco", } result = send_show_command(device, ["sh clock", "sh ip int br"]) pprint(result, width=120)
from pprint import pprint import socket import yaml from netmiko import ( Netmiko, NetmikoTimeoutException, NetmikoAuthenticationException, ) def send_show_command(device, commands): result = {} if type(commands) == str: commands = [commands] try: with Netmiko(**device) as ssh: ssh.enable() for command in commands: output = ssh.send_command(command) result[command] = output return result except NetmikoAuthenticationException as error: print(error) except socket.timeout: print(f"Timeout when connecting to {device['host']}") if __name__ == "__main__": device = { "device_type": "cisco_ios_telnet", "host": "192.168.100.1", "username": "cisco", "password": "cisco", "secret": "cisco", } result = send_show_command(device, ["sh clock", "sh ip int br"]) pprint(result, width=120)