after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def _create_dag_runs(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
# Bulk Fetch DagRuns with dag_id and execution_date same
# as DagModel.dag_id and DagModel.next_dagrun
# This list is used to verify if the DagRun already exist so that we don't attempt to create
# duplicate dag runs
active_dagruns = (
session.query(DagRun.dag_id, DagRun.execution_date)
.filter(
tuple_(DagRun.dag_id, DagRun.execution_date).in_(
[(dm.dag_id, dm.next_dagrun) for dm in dag_models]
)
)
.all()
)
for dag_model in dag_models:
try:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception(
"DAG '%s' not found in serialized_dag table", dag_model.dag_id
)
continue
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
# Explicitly check if the DagRun already exists. This is an edge case
# where a Dag Run is created but `DagModel.next_dagrun` and `DagModel.next_dagrun_create_after`
# are not updated.
# We opted to check DagRun existence instead
# of catching an Integrity error and rolling back the session i.e
# we need to run self._update_dag_next_dagruns if the Dag Run already exists or if we
# create a new one. This is so that in the next Scheduling loop we try to create new runs
# instead of falling in a loop of Integrity Error.
if (dag.dag_id, dag_model.next_dagrun) not in active_dagruns:
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag_model.next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
)
self._update_dag_next_dagruns(dag_models, session)
|
def _create_dag_runs(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
for dag_model in dag_models:
try:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception(
"DAG '%s' not found in serialized_dag table", dag_model.dag_id
)
continue
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag_model.next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
)
self._update_dag_next_dagruns(dag_models, session)
|
https://github.com/apache/airflow/issues/13685
|
{2021-01-15 09:06:22,636} {{scheduler_job.py:1293}} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1276, in _execute_context
self.dialect.do_execute(
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 609, in do_execute
cursor.execute(statement, parameters)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/MySQLdb/cursors.py", line 209, in execute
res = self._query(query)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/MySQLdb/cursors.py", line 315, in _query
db.query(q)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/MySQLdb/connections.py", line 239, in query
_mysql.connection.query(self, query)
MySQLdb._exceptions.IntegrityError: (1062, "Duplicate entry 'huge_demo13499411352-2021-01-15 01:04:00.000000' for key 'dag_run.dag_id'")
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1275, in _execute
self._run_scheduler_loop()
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1377, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1474, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1561, in _create_dag_runs
dag.create_dagrun(
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/airflow/utils/session.py", line 62, in wrapper
return func(*args, **kwargs)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/airflow/models/dag.py", line 1807, in create_dagrun
session.flush()
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 2540, in flush
self._flush(objects)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 2682, in _flush
transaction.rollback(_capture_exception=True)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py", line 68, in __exit__
compat.raise_(
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 2642, in _flush
flush_context.execute()
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
rec.execute(self)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/orm/unitofwork.py", line 586, in execute
persistence.save_obj(
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/orm/persistence.py", line 239, in save_obj
_emit_insert_statements(
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/orm/persistence.py", line 1135, in _emit_insert_statements
result = cached_connections[connection].execute(
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1011, in execute
return meth(self, multiparams, params)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1124, in _execute_clauseelement
ret = self._execute_context(
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1316, in _execute_context
self._handle_dbapi_exception(
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1510, in _handle_dbapi_exception
util.raise_(
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1276, in _execute_context
self.dialect.do_execute(
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 609, in do_execute
cursor.execute(statement, parameters)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/MySQLdb/cursors.py", line 209, in execute
res = self._query(query)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/MySQLdb/cursors.py", line 315, in _query
db.query(q)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/MySQLdb/connections.py", line 239, in query
_mysql.connection.query(self, query)
sqlalchemy.exc.IntegrityError: (MySQLdb._exceptions.IntegrityError) (1062, "Duplicate entry 'huge_demo13499411352-2021-01-15 01:04:00.000000' for key 'dag_run.dag_id'")
[SQL: INSERT INTO dag_run (dag_id, execution_date, start_date, end_date, state, run_id, creating_job_id, external_trigger, run_type, conf, last_scheduling_decision, dag_hash) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)]
[parameters: ('huge_demo13499411352', datetime.datetime(2021, 1, 15, 1, 4), datetime.datetime(2021, 1, 15, 1, 6, 22, 629433), None, 'running', 'scheduled__2021-01-15T01:04:00+00:00', 71466, 0, <DagRunType.SCHEDULED: 'scheduled'>, b'\x80\x05}\x94.', None, '60078c379cdeecb9bc8844eed5aa9745')]
(Background on this error at: http://sqlalche.me/e/13/gkpj)
{2021-01-15 09:06:23,648} {{process_utils.py:95}} INFO - Sending Signals.SIGTERM to GPID 66351
{2021-01-15 09:06:23,781} {{process_utils.py:61}} INFO - Process psutil.Process(pid=66351, status='terminated') (66351) terminated with exit code 0
{2021-01-15 09:06:23,781} {{scheduler_job.py:1296}} INFO - Exited execute loop
|
MySQLdb._exceptions.IntegrityError
|
def upgrade(): # noqa: D103
# We previously had a KnownEvent's table, but we deleted the table without
# a down migration to remove it (so we didn't delete anyone's data if they
# were happing to use the feature.
#
# But before we can delete the users table we need to drop the FK
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
tables = inspector.get_table_names()
if "known_event" in tables:
for fkey in inspector.get_foreign_keys(
table_name="known_event", referred_table="users"
):
if fkey["name"]:
with op.batch_alter_table(table_name="known_event") as bop:
bop.drop_constraint(fkey["name"], type_="foreignkey")
if "chart" in tables:
op.drop_table(
"chart",
)
if "users" in tables:
op.drop_table("users")
|
def upgrade(): # noqa: D103
# We previously had a KnownEvent's table, but we deleted the table without
# a down migration to remove it (so we didn't delete anyone's data if they
# were happing to use the feature.
#
# But before we can delete the users table we need to drop the FK
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
tables = inspector.get_table_names()
if "known_event" in tables:
for fkey in inspector.get_foreign_keys(
table_name="known_event", referred_table="users"
):
op.drop_constraint(fkey["name"], "known_event", type_="foreignkey")
if "chart" in tables:
op.drop_table(
"chart",
)
if "users" in tables:
op.drop_table("users")
|
https://github.com/apache/airflow/issues/13877
|
[2021-01-24 08:38:42,015] {db.py:678} INFO - Creating tables
INFO [alembic.runtime.migration] Context impl SQLiteImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
INFO [alembic.runtime.migration] Running upgrade 03afc6b6f902 -> cf5dc11e79ad, drop_user_and_chart
Traceback (most recent call last):
File "/Users/vijayantsoni/.virtualenvs/airflow/bin/airflow", line 11, in <module>
sys.exit(main())
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/airflow/__main__.py", line 40, in main
args.func(args)
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/airflow/cli/cli_parser.py", line 48, in command
return func(*args, **kwargs)
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/airflow/cli/commands/db_command.py", line 31, in initdb
db.initdb()
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/airflow/utils/db.py", line 549, in initdb
upgradedb()
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/airflow/utils/db.py", line 688, in upgradedb
command.upgrade(config, 'heads')
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/alembic/command.py", line 294, in upgrade
script.run_env()
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/alembic/script/base.py", line 481, in run_env
util.load_python_file(self.dir, "env.py")
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/alembic/util/pyfiles.py", line 97, in load_python_file
module = load_module_py(module_id, path)
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/alembic/util/compat.py", line 182, in load_module_py
spec.loader.exec_module(module)
File "<frozen importlib._bootstrap_external>", line 783, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/airflow/migrations/env.py", line 108, in <module>
run_migrations_online()
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/airflow/migrations/env.py", line 102, in run_migrations_online
context.run_migrations()
File "<string>", line 8, in run_migrations
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/alembic/runtime/environment.py", line 813, in run_migrations
self.get_context().run_migrations(**kw)
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/alembic/runtime/migration.py", line 560, in run_migrations
step.migration_fn(**kw)
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/airflow/migrations/versions/cf5dc11e79ad_drop_user_and_chart.py", line 49, in upgrade
op.drop_constraint('known_event_user_id_fkey', 'known_event')
File "<string>", line 8, in drop_constraint
File "<string>", line 3, in drop_constraint
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/alembic/operations/ops.py", line 148, in drop_constraint
return operations.invoke(op)
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/alembic/operations/base.py", line 354, in invoke
return fn(self, operation)
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/alembic/operations/toimpl.py", line 160, in drop_constraint
operations.impl.drop_constraint(
File "/Users/vijayantsoni/.virtualenvs/airflow/lib/python3.8/site-packages/alembic/ddl/sqlite.py", line 52, in drop_constraint
raise NotImplementedError(
NotImplementedError: No support for ALTER of constraints in SQLite dialectPlease refer to the batch mode feature which allows for SQLite migrations using a copy-and-move strategy.
|
NotImplementedError
|
def __init__(
self,
*,
trigger_dag_id: str,
conf: Optional[Dict] = None,
execution_date: Optional[Union[str, datetime.datetime]] = None,
reset_dag_run: bool = False,
wait_for_completion: bool = False,
poke_interval: int = 60,
allowed_states: Optional[List] = None,
failed_states: Optional[List] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger_dag_id = trigger_dag_id
self.conf = conf
self.reset_dag_run = reset_dag_run
self.wait_for_completion = wait_for_completion
self.poke_interval = poke_interval
self.allowed_states = allowed_states or [State.SUCCESS]
self.failed_states = failed_states or [State.FAILED]
if not isinstance(execution_date, (str, datetime.datetime, type(None))):
raise TypeError(
"Expected str or datetime.datetime type for execution_date.Got {}".format(
type(execution_date)
)
)
self.execution_date: Optional[datetime.datetime] = execution_date # type: ignore
try:
json.dumps(self.conf)
except TypeError:
raise AirflowException("conf parameter should be JSON Serializable")
|
def __init__(
self,
*,
trigger_dag_id: str,
conf: Optional[Dict] = None,
execution_date: Optional[Union[str, datetime.datetime]] = None,
reset_dag_run: bool = False,
wait_for_completion: bool = False,
poke_interval: int = 60,
allowed_states: Optional[List] = None,
failed_states: Optional[List] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger_dag_id = trigger_dag_id
self.conf = conf
self.reset_dag_run = reset_dag_run
self.wait_for_completion = wait_for_completion
self.poke_interval = poke_interval
self.allowed_states = allowed_states or [State.SUCCESS]
self.failed_states = failed_states or [State.FAILED]
if not isinstance(execution_date, (str, datetime.datetime, type(None))):
raise TypeError(
"Expected str or datetime.datetime type for execution_date.Got {}".format(
type(execution_date)
)
)
self.execution_date: Optional[datetime.datetime] = execution_date # type: ignore
|
https://github.com/apache/airflow/issues/13414
|
Ooops!
Something bad has happened.
Please consider letting us know by creating a bug report using GitHub.
Python version: 3.6.9
Airflow version: 2.0.0
Node: henry-Inspiron-5566
-------------------------------------------------------------------------------
Traceback (most recent call last):
File "/home/henry/Envs2/airflow3/lib/python3.6/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/home/henry/Envs2/airflow3/lib/python3.6/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/henry/Envs2/airflow3/lib/python3.6/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/henry/Envs2/airflow3/lib/python3.6/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/home/henry/Envs2/airflow3/lib/python3.6/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/henry/Envs2/airflow3/lib/python3.6/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/henry/Envs2/airflow3/lib/python3.6/site-packages/airflow/www/auth.py", line 34, in decorated
return func(*args, **kwargs)
File "/home/henry/Envs2/airflow3/lib/python3.6/site-packages/airflow/www/decorators.py", line 97, in view_func
return f(*args, **kwargs)
File "/home/henry/Envs2/airflow3/lib/python3.6/site-packages/airflow/www/decorators.py", line 60, in wrapper
return f(*args, **kwargs)
File "/home/henry/Envs2/airflow3/lib/python3.6/site-packages/airflow/www/views.py", line 1997, in tree
data = htmlsafe_json_dumps(data, separators=(',', ':'))
File "/home/henry/Envs2/airflow3/lib/python3.6/site-packages/jinja2/utils.py", line 614, in htmlsafe_json_dumps
dumper(obj, **kwargs)
File "/usr/lib/python3.6/json/__init__.py", line 238, in dumps
**kw).encode(obj)
File "/usr/lib/python3.6/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/lib/python3.6/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/usr/lib/python3.6/json/encoder.py", line 180, in default
o.__class__.__name__)
TypeError: Object of type 'datetime' is not JSON serializable
|
TypeError
|
def get_dag(self, dag_id, session: Session = None):
"""
Gets the DAG out of the dictionary, and refreshes it if expired
:param dag_id: DAG Id
:type dag_id: str
"""
# Avoid circular import
from airflow.models.dag import DagModel
if self.read_dags_from_db:
# Import here so that serialized dag is only imported when serialization is enabled
from airflow.models.serialized_dag import SerializedDagModel
if dag_id not in self.dags:
# Load from DB if not (yet) in the bag
self._add_dag_from_db(dag_id=dag_id, session=session)
return self.dags.get(dag_id)
# If DAG is in the DagBag, check the following
# 1. if time has come to check if DAG is updated (controlled by min_serialized_dag_fetch_secs)
# 2. check the last_updated column in SerializedDag table to see if Serialized DAG is updated
# 3. if (2) is yes, fetch the Serialized DAG.
min_serialized_dag_fetch_secs = timedelta(
seconds=settings.MIN_SERIALIZED_DAG_FETCH_INTERVAL
)
if (
dag_id in self.dags_last_fetched
and timezone.utcnow()
> self.dags_last_fetched[dag_id] + min_serialized_dag_fetch_secs
):
sd_last_updated_datetime = SerializedDagModel.get_last_updated_datetime(
dag_id=dag_id,
session=session,
)
if (
sd_last_updated_datetime
and sd_last_updated_datetime > self.dags_last_fetched[dag_id]
):
self._add_dag_from_db(dag_id=dag_id, session=session)
return self.dags.get(dag_id)
# If asking for a known subdag, we want to refresh the parent
dag = None
root_dag_id = dag_id
if dag_id in self.dags:
dag = self.dags[dag_id]
if dag.is_subdag:
root_dag_id = dag.parent_dag.dag_id # type: ignore
# If DAG Model is absent, we can't check last_expired property. Is the DAG not yet synchronized?
orm_dag = DagModel.get_current(root_dag_id, session=session)
if not orm_dag:
return self.dags.get(dag_id)
# If the dag corresponding to root_dag_id is absent or expired
is_missing = root_dag_id not in self.dags
is_expired = orm_dag.last_expired and dag and dag.last_loaded < orm_dag.last_expired
if is_missing or is_expired:
# Reprocess source file
found_dags = self.process_file(
filepath=correct_maybe_zipped(orm_dag.fileloc), only_if_updated=False
)
# If the source file no longer exports `dag_id`, delete it from self.dags
if found_dags and dag_id in [found_dag.dag_id for found_dag in found_dags]:
return self.dags[dag_id]
elif dag_id in self.dags:
del self.dags[dag_id]
return self.dags.get(dag_id)
|
def get_dag(self, dag_id, session: Session = None):
"""
Gets the DAG out of the dictionary, and refreshes it if expired
:param dag_id: DAG Id
:type dag_id: str
"""
# Avoid circular import
from airflow.models.dag import DagModel
if self.read_dags_from_db:
# Import here so that serialized dag is only imported when serialization is enabled
from airflow.models.serialized_dag import SerializedDagModel
if dag_id not in self.dags:
# Load from DB if not (yet) in the bag
self._add_dag_from_db(dag_id=dag_id, session=session)
return self.dags.get(dag_id)
# If DAG is in the DagBag, check the following
# 1. if time has come to check if DAG is updated (controlled by min_serialized_dag_fetch_secs)
# 2. check the last_updated column in SerializedDag table to see if Serialized DAG is updated
# 3. if (2) is yes, fetch the Serialized DAG.
min_serialized_dag_fetch_secs = timedelta(
seconds=settings.MIN_SERIALIZED_DAG_FETCH_INTERVAL
)
if (
dag_id in self.dags_last_fetched
and timezone.utcnow()
> self.dags_last_fetched[dag_id] + min_serialized_dag_fetch_secs
):
sd_last_updated_datetime = SerializedDagModel.get_last_updated_datetime(
dag_id=dag_id,
session=session,
)
if sd_last_updated_datetime > self.dags_last_fetched[dag_id]:
self._add_dag_from_db(dag_id=dag_id, session=session)
return self.dags.get(dag_id)
# If asking for a known subdag, we want to refresh the parent
dag = None
root_dag_id = dag_id
if dag_id in self.dags:
dag = self.dags[dag_id]
if dag.is_subdag:
root_dag_id = dag.parent_dag.dag_id # type: ignore
# If DAG Model is absent, we can't check last_expired property. Is the DAG not yet synchronized?
orm_dag = DagModel.get_current(root_dag_id, session=session)
if not orm_dag:
return self.dags.get(dag_id)
# If the dag corresponding to root_dag_id is absent or expired
is_missing = root_dag_id not in self.dags
is_expired = orm_dag.last_expired and dag and dag.last_loaded < orm_dag.last_expired
if is_missing or is_expired:
# Reprocess source file
found_dags = self.process_file(
filepath=correct_maybe_zipped(orm_dag.fileloc), only_if_updated=False
)
# If the source file no longer exports `dag_id`, delete it from self.dags
if found_dags and dag_id in [found_dag.dag_id for found_dag in found_dags]:
return self.dags[dag_id]
elif dag_id in self.dags:
del self.dags[dag_id]
return self.dags.get(dag_id)
|
https://github.com/apache/airflow/issues/13667
|
2021-01-14 14:07:44,429} {{scheduler_job.py:1754}} INFO - Resetting orphaned tasks for active dag runs
{2021-01-14 14:08:14,470} {{scheduler_job.py:1754}} INFO - Resetting orphaned tasks for active dag runs
{2021-01-14 14:08:16,968} {{scheduler_job.py:1293}} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1275, in _execute
self._run_scheduler_loop()
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1377, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1516, in _do_scheduling
self._schedule_dag_run(dag_run, active_runs_by_dag_id.get(dag_run.dag_id, set()), session)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1629, in _schedule_dag_run
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/airflow/utils/session.py", line 62, in wrapper
return func(*args, **kwargs)
File "/home/app/.pyenv/versions/3.8.1/envs/airflow-py381/lib/python3.8/site-packages/airflow/models/dagbag.py", line 187, in get_dag
if sd_last_updated_datetime > self.dags_last_fetched[dag_id]:
TypeError: '>' not supported between instances of 'NoneType' and 'datetime.datetime'
{2021-01-14 14:08:17,975} {{process_utils.py:95}} INFO - Sending Signals.SIGTERM to GPID 53178
{2021-01-14 14:08:18,212} {{process_utils.py:61}} INFO - Process psutil.Process(pid=58676, status='terminated') (58676) terminated with exit code None
{2021-01-14 14:08:18,295} {{process_utils.py:201}} INFO - Waiting up to 5 seconds for processes to exit...
{2021-01-14 14:08:18,345} {{process_utils.py:61}} INFO - Process psutil.Process(pid=53178, status='terminated') (53178) terminated with exit code 0
{2021-01-14 14:08:18,345} {{process_utils.py:61}} INFO - Process psutil.Process(pid=58677, status='terminated') (58677) terminated with exit code None
{2021-01-14 14:08:18,346} {{process_utils.py:61}} INFO - Process psutil.Process(pid=58678, status='terminated') (58678) terminated with exit code None
{2021-01-14 14:08:18,346} {{process_utils.py:61}} INFO - Process psutil.Process(pid=58708, status='terminated') (58708) terminated with exit code None
{2021-01-14 14:08:18,346} {{scheduler_job.py:1296}} INFO - Exited execute loop
|
TypeError
|
def shell(args):
"""Run a shell that allows to access metadata database"""
url = settings.engine.url
print("DB: " + repr(url))
if url.get_backend_name() == "mysql":
with NamedTemporaryFile(suffix="my.cnf") as f:
content = textwrap.dedent(
f"""
[client]
host = {url.host}
user = {url.username}
password = {url.password or ""}
port = {url.port or "3306"}
database = {url.database}
"""
).strip()
f.write(content.encode())
f.flush()
execute_interactive(["mysql", f"--defaults-extra-file={f.name}"])
elif url.get_backend_name() == "sqlite":
execute_interactive(["sqlite3", url.database])
elif url.get_backend_name() == "postgresql":
env = os.environ.copy()
env["PGHOST"] = url.host or ""
env["PGPORT"] = str(url.port or "5432")
env["PGUSER"] = url.username or ""
# PostgreSQL does not allow the use of PGPASSFILE if the current user is root.
env["PGPASSWORD"] = url.password or ""
env["PGDATABASE"] = url.database
execute_interactive(["psql"], env=env)
else:
raise AirflowException(f"Unknown driver: {url.drivername}")
|
def shell(args):
"""Run a shell that allows to access metadata database"""
url = settings.engine.url
print("DB: " + repr(url))
if url.get_backend_name() == "mysql":
with NamedTemporaryFile(suffix="my.cnf") as f:
content = textwrap.dedent(
f"""
[client]
host = {url.host}
user = {url.username}
password = {url.password or ""}
port = {url.port or "3306"}
database = {url.database}
"""
).strip()
f.write(content.encode())
f.flush()
execute_interactive(["mysql", f"--defaults-extra-file={f.name}"])
elif url.get_backend_name() == "sqlite":
execute_interactive(["sqlite3", url.database]).wait()
elif url.get_backend_name() == "postgresql":
env = os.environ.copy()
env["PGHOST"] = url.host or ""
env["PGPORT"] = str(url.port or "5432")
env["PGUSER"] = url.username or ""
# PostgreSQL does not allow the use of PGPASSFILE if the current user is root.
env["PGPASSWORD"] = url.password or ""
env["PGDATABASE"] = url.database
execute_interactive(["psql"], env=env)
else:
raise AirflowException(f"Unknown driver: {url.drivername}")
|
https://github.com/apache/airflow/issues/12806
|
[2020-12-04 07:31:28,506] {process_utils.py:149} INFO - Executing cmd: sqlite3 /home/airflow/airflow/airflow.db
SQLite version 3.31.1 2020-01-27 19:55:54
Enter ".help" for usage hints.
sqlite> ;
sqlite> .quit
Traceback (most recent call last):
File "/home/airflow/sandbox/bin/airflow", line 8, in <module>
sys.exit(main())
File "/home/airflow/sandbox/lib/python3.8/site-packages/airflow/__main__.py", line 40, in main
args.func(args)
File "/home/airflow/sandbox/lib/python3.8/site-packages/airflow/cli/cli_parser.py", line 50, in command
return func(*args, **kwargs)
File "/home/airflow/sandbox/lib/python3.8/site-packages/airflow/utils/cli.py", line 86, in wrapper
return f(*args, **kwargs)
File "/home/airflow/sandbox/lib/python3.8/site-packages/airflow/cli/commands/db_command.py", line 78, in shell
execute_interactive(["sqlite3", url.database]).wait()
AttributeError: 'NoneType' object has no attribute 'wait'
|
AttributeError
|
def _do_scheduling(self, session) -> int:
"""
This function is where the main scheduling decisions take places. It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
The reason we don't select all dagruns at once because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_execute_task_instances for more.
:return: Number of TIs enqueued in this iteration
:rtype: int
"""
# Put a check in place to make sure we don't commit unexpectedly
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
query = DagModel.dags_needing_dagruns(session)
self._create_dag_runs(query.all(), session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagruns
dag_runs = DagRun.next_dagruns_to_examine(session)
# Bulk fetch the currently active dag runs for the dags we are
# examining, rather than making one query per DagRun
# TODO: This query is probably horribly inefficient (though there is an
# index on (dag_id,state)). It is to deal with the case when a user
# clears more than max_active_runs older tasks -- we don't want the
# scheduler to suddenly go and start running tasks from all of the
# runs. (AIRFLOW-137/GH #1442)
#
# The longer term fix would be to have `clear` do this, and put DagRuns
# in to the queued state, then take DRs out of queued before creating
# any new ones
# Build up a set of execution_dates that are "active" for a given
# dag_id -- only tasks from those runs will be scheduled.
active_runs_by_dag_id = defaultdict(set)
query = (
session.query(
TI.dag_id,
TI.execution_date,
)
.filter(
TI.dag_id.in_(list({dag_run.dag_id for dag_run in dag_runs})),
TI.state.notin_(list(State.finished) + [State.REMOVED]),
)
.group_by(TI.dag_id, TI.execution_date)
)
for dag_id, execution_date in query:
active_runs_by_dag_id[dag_id].add(execution_date)
for dag_run in dag_runs:
# Use try_except to not stop the Scheduler when a Serialized DAG is not found
# This takes care of Dynamic DAGs especially
# SerializedDagNotFound should not happen here in the same loop because the DagRun would
# not be created in self._create_dag_runs if Serialized DAG does not exist
# But this would take care of the scenario when the Scheduler is restarted after DagRun is
# created and the DAG is deleted / renamed
try:
self._schedule_dag_run(
dag_run, active_runs_by_dag_id.get(dag_run.dag_id, set()), session
)
except SerializedDagNotFound:
self.log.exception(
"DAG '%s' not found in serialized_dag table", dag_run.dag_id
)
continue
guard.commit()
# Without this, the session has an invalid view of the DB
session.expunge_all()
# END: schedule TIs
try:
if self.executor.slots_available <= 0:
# We know we can't do anything here, so don't even try!
self.log.debug("Executor full, skipping critical section")
return 0
timer = Stats.timer("scheduler.critical_section_duration")
timer.start()
# Find anything TIs in state SCHEDULED, try to QUEUE it (send it to the executor)
num_queued_tis = self._critical_section_execute_task_instances(
session=session
)
# Make sure we only sent this metric if we obtained the lock, otherwise we'll skew the
# metric, way down
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
if is_lock_not_available_error(error=e):
self.log.debug("Critical section lock held by another Scheduler")
Stats.incr("scheduler.critical_section_busy")
session.rollback()
return 0
raise
guard.commit()
return num_queued_tis
|
def _do_scheduling(self, session) -> int:
"""
This function is where the main scheduling decisions take places. It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
The reason we don't select all dagruns at once because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_execute_task_instances for more.
:return: Number of TIs enqueued in this iteration
:rtype: int
"""
# Put a check in place to make sure we don't commit unexpectedly
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
query = DagModel.dags_needing_dagruns(session)
self._create_dag_runs(query.all(), session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagruns
dag_runs = DagRun.next_dagruns_to_examine(session)
# Bulk fetch the currently active dag runs for the dags we are
# examining, rather than making one query per DagRun
# TODO: This query is probably horribly inefficient (though there is an
# index on (dag_id,state)). It is to deal with the case when a user
# clears more than max_active_runs older tasks -- we don't want the
# scheduler to suddenly go and start running tasks from all of the
# runs. (AIRFLOW-137/GH #1442)
#
# The longer term fix would be to have `clear` do this, and put DagRuns
# in to the queued state, then take DRs out of queued before creating
# any new ones
# Build up a set of execution_dates that are "active" for a given
# dag_id -- only tasks from those runs will be scheduled.
active_runs_by_dag_id = defaultdict(set)
query = (
session.query(
TI.dag_id,
TI.execution_date,
)
.filter(
TI.dag_id.in_(list({dag_run.dag_id for dag_run in dag_runs})),
TI.state.notin_(list(State.finished) + [State.REMOVED]),
)
.group_by(TI.dag_id, TI.execution_date)
)
for dag_id, execution_date in query:
active_runs_by_dag_id[dag_id].add(execution_date)
for dag_run in dag_runs:
self._schedule_dag_run(
dag_run, active_runs_by_dag_id.get(dag_run.dag_id, set()), session
)
guard.commit()
# Without this, the session has an invalid view of the DB
session.expunge_all()
# END: schedule TIs
try:
if self.executor.slots_available <= 0:
# We know we can't do anything here, so don't even try!
self.log.debug("Executor full, skipping critical section")
return 0
timer = Stats.timer("scheduler.critical_section_duration")
timer.start()
# Find anything TIs in state SCHEDULED, try to QUEUE it (send it to the executor)
num_queued_tis = self._critical_section_execute_task_instances(
session=session
)
# Make sure we only sent this metric if we obtained the lock, otherwise we'll skew the
# metric, way down
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
if is_lock_not_available_error(error=e):
self.log.debug("Critical section lock held by another Scheduler")
Stats.incr("scheduler.critical_section_busy")
session.rollback()
return 0
raise
guard.commit()
return num_queued_tis
|
https://github.com/apache/airflow/issues/13504
|
[2021-01-06 10:09:38,742] {scheduler_job.py:1293} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/global/packages/python/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1275, in _execute
self._run_scheduler_loop()
File "/global/packages/python/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1377, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/global/packages/python/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1474, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/global/packages/python/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1557, in _create_dag_runs
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
File "/global/packages/python/lib/python3.7/site-packages/airflow/utils/session.py", line 62, in wrapper
return func(*args, **kwargs)
File "/global/packages/python/lib/python3.7/site-packages/airflow/models/dagbag.py", line 171, in get_dag
self._add_dag_from_db(dag_id=dag_id, session=session)
File "/global/packages/python/lib/python3.7/site-packages/airflow/models/dagbag.py", line 227, in _add_dag_from_db
raise SerializedDagNotFound(f"DAG '{dag_id}' not found in serialized_dag table")
airflow.exceptions.SerializedDagNotFound: DAG 'dynamic_dag_1' not found in serialized_dag table
|
airflow.exceptions.SerializedDagNotFound
|
def _create_dag_runs(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
for dag_model in dag_models:
try:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception(
"DAG '%s' not found in serialized_dag table", dag_model.dag_id
)
continue
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag_model.next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
)
self._update_dag_next_dagruns(dag_models, session)
|
def _create_dag_runs(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
for dag_model in dag_models:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag_model.next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
)
self._update_dag_next_dagruns(dag_models, session)
|
https://github.com/apache/airflow/issues/13504
|
[2021-01-06 10:09:38,742] {scheduler_job.py:1293} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/global/packages/python/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1275, in _execute
self._run_scheduler_loop()
File "/global/packages/python/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1377, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/global/packages/python/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1474, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/global/packages/python/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1557, in _create_dag_runs
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
File "/global/packages/python/lib/python3.7/site-packages/airflow/utils/session.py", line 62, in wrapper
return func(*args, **kwargs)
File "/global/packages/python/lib/python3.7/site-packages/airflow/models/dagbag.py", line 171, in get_dag
self._add_dag_from_db(dag_id=dag_id, session=session)
File "/global/packages/python/lib/python3.7/site-packages/airflow/models/dagbag.py", line 227, in _add_dag_from_db
raise SerializedDagNotFound(f"DAG '{dag_id}' not found in serialized_dag table")
airflow.exceptions.SerializedDagNotFound: DAG 'dynamic_dag_1' not found in serialized_dag table
|
airflow.exceptions.SerializedDagNotFound
|
def _update_dag_next_dagruns(
self, dag_models: Iterable[DagModel], session: Session
) -> None:
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
# Check max_active_runs, to see if we are _now_ at the limit for any of
# these dag? (we've just created a DagRun for them after all)
active_runs_of_dags = dict(
session.query(DagRun.dag_id, func.count("*"))
.filter(
DagRun.dag_id.in_([o.dag_id for o in dag_models]),
DagRun.state == State.RUNNING, # pylint: disable=comparison-with-callable
DagRun.external_trigger.is_(False),
)
.group_by(DagRun.dag_id)
.all()
)
for dag_model in dag_models:
# Get the DAG in a try_except to not stop the Scheduler when a Serialized DAG is not found
# This takes care of Dynamic DAGs especially
try:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception(
"DAG '%s' not found in serialized_dag table", dag_model.dag_id
)
continue
active_runs_of_dag = active_runs_of_dags.get(dag.dag_id, 0)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id,
active_runs_of_dag,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = (
dag.next_dagrun_info(dag_model.next_dagrun)
)
|
def _update_dag_next_dagruns(
self, dag_models: Iterable[DagModel], session: Session
) -> None:
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
# Check max_active_runs, to see if we are _now_ at the limit for any of
# these dag? (we've just created a DagRun for them after all)
active_runs_of_dags = dict(
session.query(DagRun.dag_id, func.count("*"))
.filter(
DagRun.dag_id.in_([o.dag_id for o in dag_models]),
DagRun.state == State.RUNNING, # pylint: disable=comparison-with-callable
DagRun.external_trigger.is_(False),
)
.group_by(DagRun.dag_id)
.all()
)
for dag_model in dag_models:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
active_runs_of_dag = active_runs_of_dags.get(dag.dag_id, 0)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id,
active_runs_of_dag,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = (
dag.next_dagrun_info(dag_model.next_dagrun)
)
|
https://github.com/apache/airflow/issues/13504
|
[2021-01-06 10:09:38,742] {scheduler_job.py:1293} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/global/packages/python/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1275, in _execute
self._run_scheduler_loop()
File "/global/packages/python/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1377, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/global/packages/python/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1474, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/global/packages/python/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1557, in _create_dag_runs
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
File "/global/packages/python/lib/python3.7/site-packages/airflow/utils/session.py", line 62, in wrapper
return func(*args, **kwargs)
File "/global/packages/python/lib/python3.7/site-packages/airflow/models/dagbag.py", line 171, in get_dag
self._add_dag_from_db(dag_id=dag_id, session=session)
File "/global/packages/python/lib/python3.7/site-packages/airflow/models/dagbag.py", line 227, in _add_dag_from_db
raise SerializedDagNotFound(f"DAG '{dag_id}' not found in serialized_dag table")
airflow.exceptions.SerializedDagNotFound: DAG 'dynamic_dag_1' not found in serialized_dag table
|
airflow.exceptions.SerializedDagNotFound
|
def sync_to_db(self, session: Optional[Session] = None):
"""Save attributes about list of DAG to the DB."""
# To avoid circular import - airflow.models.dagbag -> airflow.models.dag -> airflow.models.dagbag
from airflow.models.dag import DAG
from airflow.models.serialized_dag import SerializedDagModel
def _serialze_dag_capturing_errors(dag, session):
"""
Try to serialize the dag to the DB, but make a note of any errors.
We can't place them directly in import_errors, as this may be retried, and work the next time
"""
if dag.is_subdag:
return []
try:
# We cant use bulk_write_to_db as we want to capture each error individually
SerializedDagModel.write_dag(
dag,
min_update_interval=settings.MIN_SERIALIZED_DAG_UPDATE_INTERVAL,
session=session,
)
return []
except OperationalError:
raise
except Exception: # pylint: disable=broad-except
return [
(
dag.fileloc,
traceback.format_exc(
limit=-self.dagbag_import_error_traceback_depth
),
)
]
# Retry 'DAG.bulk_write_to_db' & 'SerializedDagModel.bulk_sync_to_db' in case
# of any Operational Errors
# In case of failures, provide_session handles rollback
for attempt in tenacity.Retrying(
retry=tenacity.retry_if_exception_type(exception_types=OperationalError),
wait=tenacity.wait_random_exponential(multiplier=0.5, max=5),
stop=tenacity.stop_after_attempt(settings.MAX_DB_RETRIES),
before_sleep=tenacity.before_sleep_log(self.log, logging.DEBUG),
reraise=True,
):
with attempt:
serialize_errors = []
self.log.debug(
"Running dagbag.sync_to_db with retries. Try %d of %d",
attempt.retry_state.attempt_number,
settings.MAX_DB_RETRIES,
)
self.log.debug("Calling the DAG.bulk_sync_to_db method")
try:
# Write Serialized DAGs to DB, capturing errors
for dag in self.dags.values():
serialize_errors.extend(
_serialze_dag_capturing_errors(dag, session)
)
DAG.bulk_write_to_db(self.dags.values(), session=session)
except OperationalError:
session.rollback()
raise
# Only now we are "complete" do we update import_errors - don't want to record errors from
# previous failed attempts
self.import_errors.update(dict(serialize_errors))
|
def sync_to_db(self, session: Optional[Session] = None):
"""Save attributes about list of DAG to the DB."""
# To avoid circular import - airflow.models.dagbag -> airflow.models.dag -> airflow.models.dagbag
from airflow.models.dag import DAG
from airflow.models.serialized_dag import SerializedDagModel
def _serialze_dag_capturing_errors(dag, session):
"""
Try to serialize the dag to the DB, but make a note of any errors.
We can't place them directly in import_errors, as this may be retried, and work the next time
"""
if dag.is_subdag:
return []
try:
# We cant use bulk_write_to_db as we want to capture each error individually
SerializedDagModel.write_dag(
dag,
min_update_interval=settings.MIN_SERIALIZED_DAG_UPDATE_INTERVAL,
session=session,
)
return []
except OperationalError:
raise
except Exception: # pylint: disable=broad-except
return [
(
dag.fileloc,
traceback.format_exc(
limit=-self.dagbag_import_error_traceback_depth
),
)
]
# Retry 'DAG.bulk_write_to_db' & 'SerializedDagModel.bulk_sync_to_db' in case
# of any Operational Errors
# In case of failures, provide_session handles rollback
for attempt in tenacity.Retrying(
retry=tenacity.retry_if_exception_type(exception_types=OperationalError),
wait=tenacity.wait_random_exponential(multiplier=0.5, max=5),
stop=tenacity.stop_after_attempt(settings.MAX_DB_RETRIES),
before_sleep=tenacity.before_sleep_log(self.log, logging.DEBUG),
reraise=True,
):
with attempt:
serialize_errors = []
self.log.debug(
"Running dagbag.sync_to_db with retries. Try %d of %d",
attempt.retry_state.attempt_number,
settings.MAX_DB_RETRIES,
)
self.log.debug("Calling the DAG.bulk_sync_to_db method")
try:
DAG.bulk_write_to_db(self.dags.values(), session=session)
# Write Serialized DAGs to DB, capturing errors
for dag in self.dags.values():
serialize_errors.extend(
_serialze_dag_capturing_errors(dag, session)
)
except OperationalError:
session.rollback()
raise
# Only now we are "complete" do we update import_errors - don't want to record errors from
# previous failed attempts
self.import_errors.update(dict(serialize_errors))
|
https://github.com/apache/airflow/issues/13504
|
[2021-01-06 10:09:38,742] {scheduler_job.py:1293} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/global/packages/python/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1275, in _execute
self._run_scheduler_loop()
File "/global/packages/python/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1377, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/global/packages/python/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1474, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/global/packages/python/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1557, in _create_dag_runs
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
File "/global/packages/python/lib/python3.7/site-packages/airflow/utils/session.py", line 62, in wrapper
return func(*args, **kwargs)
File "/global/packages/python/lib/python3.7/site-packages/airflow/models/dagbag.py", line 171, in get_dag
self._add_dag_from_db(dag_id=dag_id, session=session)
File "/global/packages/python/lib/python3.7/site-packages/airflow/models/dagbag.py", line 227, in _add_dag_from_db
raise SerializedDagNotFound(f"DAG '{dag_id}' not found in serialized_dag table")
airflow.exceptions.SerializedDagNotFound: DAG 'dynamic_dag_1' not found in serialized_dag table
|
airflow.exceptions.SerializedDagNotFound
|
def _check_file(self, file_path):
problems = []
class_name_to_check = self.MACRO_PLUGIN_CLASS.split(".")[-1]
with open(file_path, "r") as file_pointer:
try:
for line_number, line in enumerate(file_pointer, 1):
if class_name_to_check in line:
problems.append(self._change_info(file_path, line_number))
except UnicodeDecodeError:
problems.append("Unable to read python file {}".format(file_path))
return problems
|
def _check_file(self, file_path):
problems = []
class_name_to_check = self.MACRO_PLUGIN_CLASS.split(".")[-1]
with open(file_path, "r") as file_pointer:
for line_number, line in enumerate(file_pointer, 1):
if class_name_to_check in line:
problems.append(self._change_info(file_path, line_number))
return problems
|
https://github.com/apache/airflow/issues/13349
|
========================================================================================================================================================== STATUS ==========================================================================================================================================================
Check for latest versions of apache-airflow and checker...........................................................................................................................................................................................................................................................SUCCESS
Traceback (most recent call last):
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/bin/airflow", line 37, in <module>
args.func(args)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/checker.py", line 88, in run
all_problems = check_upgrade(formatter, rules)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/checker.py", line 37, in check_upgrade
rule_status = RuleStatus.from_rule(rule)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/problem.py", line 44, in from_rule
result = rule.check()
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/rules/airflow_macro_plugin_removed.py", line 52, in check
problems.extend(self._check_file(file_path))
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/rules/airflow_macro_plugin_removed.py", line 42, in _check_file
for line_number, line in enumerate(file_pointer, 1):
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x82 in position 16: invalid start byte
|
UnicodeDecodeError
|
def check(self):
dag_folder = conf.get("core", "dags_folder")
file_paths = list_py_file_paths(directory=dag_folder, include_examples=False)
problems = []
for file_path in file_paths:
if not file_path.endswith(".py"):
continue
problems.extend(self._check_file(file_path))
return problems
|
def check(self):
dag_folder = conf.get("core", "dags_folder")
file_paths = list_py_file_paths(directory=dag_folder, include_examples=False)
problems = []
for file_path in file_paths:
problems.extend(self._check_file(file_path))
return problems
|
https://github.com/apache/airflow/issues/13349
|
========================================================================================================================================================== STATUS ==========================================================================================================================================================
Check for latest versions of apache-airflow and checker...........................................................................................................................................................................................................................................................SUCCESS
Traceback (most recent call last):
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/bin/airflow", line 37, in <module>
args.func(args)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/checker.py", line 88, in run
all_problems = check_upgrade(formatter, rules)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/checker.py", line 37, in check_upgrade
rule_status = RuleStatus.from_rule(rule)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/problem.py", line 44, in from_rule
result = rule.check()
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/rules/airflow_macro_plugin_removed.py", line 52, in check
problems.extend(self._check_file(file_path))
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/rules/airflow_macro_plugin_removed.py", line 42, in _check_file
for line_number, line in enumerate(file_pointer, 1):
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x82 in position 16: invalid start byte
|
UnicodeDecodeError
|
def _add_callback_to_queue(self, request: CallbackRequest):
self._callback_to_execute[request.full_filepath].append(request)
# Callback has a higher priority over DAG Run scheduling
if request.full_filepath in self._file_path_queue:
# Remove file paths matching request.full_filepath from self._file_path_queue
# Since we are already going to use that filepath to run callback,
# there is no need to have same file path again in the queue
self._file_path_queue = [
file_path
for file_path in self._file_path_queue
if file_path != request.full_filepath
]
self._file_path_queue.insert(0, request.full_filepath)
|
def _add_callback_to_queue(self, request: CallbackRequest):
self._callback_to_execute[request.full_filepath].append(request)
# Callback has a higher priority over DAG Run scheduling
if request.full_filepath in self._file_path_queue:
self._file_path_queue.remove(request.full_filepath)
self._file_path_queue.insert(0, request.full_filepath)
|
https://github.com/apache/airflow/issues/13047
|
[2020-12-13 19:35:33,752] {dagbag.py:440} INFO - Filling up the DagBag from /usr/local/lib/python3.8/site-packages/airflow/example_dags/example_bash_operator.py
Running <TaskInstance: example_bash_operator.run_after_loop 2020-12-13T19:35:30.648020+00:00 [queued]> on host 6611da4b1a27
[2020-12-13 19:35:34,517] {dagrun.py:444} INFO - Marking run <DagRun example_bash_operator @ 2020-12-13 19:35:30.648020+00:00: manual__2020-12-13T19:35:30.648020+00:00, externally triggered: True> successful
[2020-12-13 19:35:34,523] {scheduler_job.py:1193} INFO - Executor reports execution of example_bash_operator.run_after_loop execution_date=2020-12-13 19:35:30.648020+00:00 exited with status success for try_number 1
Process ForkProcess-34:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/usr/local/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/utils/dag_processing.py", line 365, in _run_processor_manager
processor_manager.start()
File "/usr/local/lib/python3.8/site-packages/airflow/utils/dag_processing.py", line 596, in start
return self._run_parsing_loop()
File "/usr/local/lib/python3.8/site-packages/airflow/utils/dag_processing.py", line 659, in _run_parsing_loop
self._processors.pop(processor.file_path)
KeyError: '/usr/local/lib/python3.8/site-packages/airflow/example_dags/example_bash_operator.py'
[2020-12-13 19:35:35,589] {dag_processing.py:396} WARNING - DagFileProcessorManager (PID=1029759) exited with exit code 1 - re-launching
|
KeyError
|
def start_new_processes(self):
"""Start more processors if we have enough slots and files to process"""
while self._parallelism - len(self._processors) > 0 and self._file_path_queue:
file_path = self._file_path_queue.pop(0)
# Stop creating duplicate processor i.e. processor with the same filepath
if file_path in self._processors.keys():
continue
callback_to_execute_for_file = self._callback_to_execute[file_path]
processor = self._processor_factory(
file_path, callback_to_execute_for_file, self._dag_ids, self._pickle_dags
)
del self._callback_to_execute[file_path]
Stats.incr("dag_processing.processes")
processor.start()
self.log.debug(
"Started a process (PID: %s) to generate tasks for %s",
processor.pid,
file_path,
)
self._processors[file_path] = processor
self.waitables[processor.waitable_handle] = processor
|
def start_new_processes(self):
"""Start more processors if we have enough slots and files to process"""
while self._parallelism - len(self._processors) > 0 and self._file_path_queue:
file_path = self._file_path_queue.pop(0)
callback_to_execute_for_file = self._callback_to_execute[file_path]
processor = self._processor_factory(
file_path, callback_to_execute_for_file, self._dag_ids, self._pickle_dags
)
del self._callback_to_execute[file_path]
Stats.incr("dag_processing.processes")
processor.start()
self.log.debug(
"Started a process (PID: %s) to generate tasks for %s",
processor.pid,
file_path,
)
self._processors[file_path] = processor
self.waitables[processor.waitable_handle] = processor
|
https://github.com/apache/airflow/issues/13047
|
[2020-12-13 19:35:33,752] {dagbag.py:440} INFO - Filling up the DagBag from /usr/local/lib/python3.8/site-packages/airflow/example_dags/example_bash_operator.py
Running <TaskInstance: example_bash_operator.run_after_loop 2020-12-13T19:35:30.648020+00:00 [queued]> on host 6611da4b1a27
[2020-12-13 19:35:34,517] {dagrun.py:444} INFO - Marking run <DagRun example_bash_operator @ 2020-12-13 19:35:30.648020+00:00: manual__2020-12-13T19:35:30.648020+00:00, externally triggered: True> successful
[2020-12-13 19:35:34,523] {scheduler_job.py:1193} INFO - Executor reports execution of example_bash_operator.run_after_loop execution_date=2020-12-13 19:35:30.648020+00:00 exited with status success for try_number 1
Process ForkProcess-34:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/usr/local/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/utils/dag_processing.py", line 365, in _run_processor_manager
processor_manager.start()
File "/usr/local/lib/python3.8/site-packages/airflow/utils/dag_processing.py", line 596, in start
return self._run_parsing_loop()
File "/usr/local/lib/python3.8/site-packages/airflow/utils/dag_processing.py", line 659, in _run_parsing_loop
self._processors.pop(processor.file_path)
KeyError: '/usr/local/lib/python3.8/site-packages/airflow/example_dags/example_bash_operator.py'
[2020-12-13 19:35:35,589] {dag_processing.py:396} WARNING - DagFileProcessorManager (PID=1029759) exited with exit code 1 - re-launching
|
KeyError
|
def dag_link(attr):
"""Generates a URL to the Graph View for a Dag."""
dag_id = attr.get("dag_id")
execution_date = attr.get("execution_date")
url = url_for("Airflow.graph", dag_id=dag_id, execution_date=execution_date)
return (
Markup('<a href="{}">{}</a>').format(url, dag_id) if dag_id else Markup("None")
) # noqa
|
def dag_link(attr):
"""Generates a URL to the Graph View for a Dag."""
dag_id = attr.get("dag_id")
execution_date = attr.get("execution_date")
url = url_for("Airflow.graph", dag_id=dag_id, execution_date=execution_date)
return Markup('<a href="{}">{}</a>').format(url, dag_id) # noqa
|
https://github.com/apache/airflow/issues/13602
|
Something bad has happened.
Please consider letting us know by creating a bug report using GitHub.
Python version: 3.6.12
Airflow version: 2.0.0
Node: 9097c882a712
-------------------------------------------------------------------------------
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/airflow/.local/lib/python3.6/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www/auth.py", line 34, in decorated
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www/decorators.py", line 97, in view_func
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www/decorators.py", line 60, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/session.py", line 65, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www/views.py", line 2028, in graph
dag = current_app.dag_bag.get_dag(dag_id)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/session.py", line 65, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/models/dagbag.py", line 171, in get_dag
self._add_dag_from_db(dag_id=dag_id, session=session)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/models/dagbag.py", line 227, in _add_dag_from_db
raise SerializedDagNotFound(f"DAG '{dag_id}' not found in serialized_dag table")
airflow.exceptions.SerializedDagNotFound: DAG 'None' not found in serialized_dag table
|
airflow.exceptions.SerializedDagNotFound
|
def __init__(
self,
*,
bucket_name: str,
prefix: str,
aws_conn_id: str = "aws_default",
verify: Optional[Union[bool, str]] = None,
inactivity_period: float = 60 * 60,
min_objects: int = 1,
previous_objects: Optional[Set[str]] = None,
allow_delete: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.prefix = prefix
if inactivity_period < 0:
raise ValueError("inactivity_period must be non-negative")
self.inactivity_period = inactivity_period
self.min_objects = min_objects
self.previous_objects = previous_objects or set()
self.inactivity_seconds = 0
self.allow_delete = allow_delete
self.aws_conn_id = aws_conn_id
self.verify = verify
self.last_activity_time: Optional[datetime] = None
|
def __init__(
self,
*,
bucket_name: str,
prefix: str,
aws_conn_id: str = "aws_default",
verify: Optional[Union[bool, str]] = None,
inactivity_period: float = 60 * 60,
min_objects: int = 1,
previous_objects: Optional[Set[str]] = None,
allow_delete: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket_name
self.prefix = prefix
if inactivity_period < 0:
raise ValueError("inactivity_period must be non-negative")
self.inactivity_period = inactivity_period
self.min_objects = min_objects
self.previous_objects = previous_objects or set()
self.inactivity_seconds = 0
self.allow_delete = allow_delete
self.aws_conn_id = aws_conn_id
self.verify = verify
self.last_activity_time: Optional[datetime] = None
|
https://github.com/apache/airflow/issues/13481
|
Traceback (most recent call last):
File "/usr/local/lib/airflow/airflow/models/taskinstance.py", line 966, in _run_raw_task
self.render_templates(context=context)
File "/usr/local/lib/airflow/airflow/models/taskinstance.py", line 1428, in render_templates
self.task.render_template_fields(context)
File "/usr/local/lib/airflow/airflow/models/baseoperator.py", line 706, in render_template_fields
self._do_render_template_fields(self, self.template_fields, context, jinja_env, set())
File "/usr/local/lib/airflow/airflow/models/baseoperator.py", line 711, in _do_render_template_fields
content = getattr(parent, attr_name)
AttributeError: 'S3KeysUnchangedSensor' object has no attribute 'bucket_name'
|
AttributeError
|
def is_keys_unchanged(self, current_objects: Set[str]) -> bool:
"""
Checks whether new objects have been uploaded and the inactivity_period
has passed and updates the state of the sensor accordingly.
:param current_objects: set of object ids in bucket during last poke.
:type current_objects: set[str]
"""
current_num_objects = len(current_objects)
if current_objects > self.previous_objects:
# When new objects arrived, reset the inactivity_seconds
# and update previous_objects for the next poke.
self.log.info(
"New objects found at %s, resetting last_activity_time.",
os.path.join(self.bucket_name, self.prefix),
)
self.log.debug("New objects: %s", current_objects - self.previous_objects)
self.last_activity_time = datetime.now()
self.inactivity_seconds = 0
self.previous_objects = current_objects
return False
if self.previous_objects - current_objects:
# During the last poke interval objects were deleted.
if self.allow_delete:
deleted_objects = self.previous_objects - current_objects
self.previous_objects = current_objects
self.last_activity_time = datetime.now()
self.log.info(
"Objects were deleted during the last poke interval. Updating the "
"file counter and resetting last_activity_time:\n%s",
deleted_objects,
)
return False
raise AirflowException(
"Illegal behavior: objects were deleted in %s between pokes."
% os.path.join(self.bucket_name, self.prefix)
)
if self.last_activity_time:
self.inactivity_seconds = int(
(datetime.now() - self.last_activity_time).total_seconds()
)
else:
# Handles the first poke where last inactivity time is None.
self.last_activity_time = datetime.now()
self.inactivity_seconds = 0
if self.inactivity_seconds >= self.inactivity_period:
path = os.path.join(self.bucket_name, self.prefix)
if current_num_objects >= self.min_objects:
self.log.info(
"SUCCESS: \nSensor found %s objects at %s.\n"
"Waited at least %s seconds, with no new objects uploaded.",
current_num_objects,
path,
self.inactivity_period,
)
return True
self.log.error(
"FAILURE: Inactivity Period passed, not enough objects found in %s", path
)
return False
return False
|
def is_keys_unchanged(self, current_objects: Set[str]) -> bool:
"""
Checks whether new objects have been uploaded and the inactivity_period
has passed and updates the state of the sensor accordingly.
:param current_objects: set of object ids in bucket during last poke.
:type current_objects: set[str]
"""
current_num_objects = len(current_objects)
if current_objects > self.previous_objects:
# When new objects arrived, reset the inactivity_seconds
# and update previous_objects for the next poke.
self.log.info(
"New objects found at %s, resetting last_activity_time.",
os.path.join(self.bucket, self.prefix),
)
self.log.debug("New objects: %s", current_objects - self.previous_objects)
self.last_activity_time = datetime.now()
self.inactivity_seconds = 0
self.previous_objects = current_objects
return False
if self.previous_objects - current_objects:
# During the last poke interval objects were deleted.
if self.allow_delete:
deleted_objects = self.previous_objects - current_objects
self.previous_objects = current_objects
self.last_activity_time = datetime.now()
self.log.info(
"Objects were deleted during the last poke interval. Updating the "
"file counter and resetting last_activity_time:\n%s",
deleted_objects,
)
return False
raise AirflowException(
"Illegal behavior: objects were deleted in %s between pokes."
% os.path.join(self.bucket, self.prefix)
)
if self.last_activity_time:
self.inactivity_seconds = int(
(datetime.now() - self.last_activity_time).total_seconds()
)
else:
# Handles the first poke where last inactivity time is None.
self.last_activity_time = datetime.now()
self.inactivity_seconds = 0
if self.inactivity_seconds >= self.inactivity_period:
path = os.path.join(self.bucket, self.prefix)
if current_num_objects >= self.min_objects:
self.log.info(
"SUCCESS: \nSensor found %s objects at %s.\n"
"Waited at least %s seconds, with no new objects uploaded.",
current_num_objects,
path,
self.inactivity_period,
)
return True
self.log.error(
"FAILURE: Inactivity Period passed, not enough objects found in %s", path
)
return False
return False
|
https://github.com/apache/airflow/issues/13481
|
Traceback (most recent call last):
File "/usr/local/lib/airflow/airflow/models/taskinstance.py", line 966, in _run_raw_task
self.render_templates(context=context)
File "/usr/local/lib/airflow/airflow/models/taskinstance.py", line 1428, in render_templates
self.task.render_template_fields(context)
File "/usr/local/lib/airflow/airflow/models/baseoperator.py", line 706, in render_template_fields
self._do_render_template_fields(self, self.template_fields, context, jinja_env, set())
File "/usr/local/lib/airflow/airflow/models/baseoperator.py", line 711, in _do_render_template_fields
content = getattr(parent, attr_name)
AttributeError: 'S3KeysUnchangedSensor' object has no attribute 'bucket_name'
|
AttributeError
|
def poke(self, context):
return self.is_keys_unchanged(
set(self.hook.list_keys(self.bucket_name, prefix=self.prefix))
)
|
def poke(self, context):
return self.is_keys_unchanged(
set(self.hook.list_keys(self.bucket, prefix=self.prefix))
)
|
https://github.com/apache/airflow/issues/13481
|
Traceback (most recent call last):
File "/usr/local/lib/airflow/airflow/models/taskinstance.py", line 966, in _run_raw_task
self.render_templates(context=context)
File "/usr/local/lib/airflow/airflow/models/taskinstance.py", line 1428, in render_templates
self.task.render_template_fields(context)
File "/usr/local/lib/airflow/airflow/models/baseoperator.py", line 706, in render_template_fields
self._do_render_template_fields(self, self.template_fields, context, jinja_env, set())
File "/usr/local/lib/airflow/airflow/models/baseoperator.py", line 711, in _do_render_template_fields
content = getattr(parent, attr_name)
AttributeError: 'S3KeysUnchangedSensor' object has no attribute 'bucket_name'
|
AttributeError
|
def get_user_roles(user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
if user.is_anonymous:
public_role = current_app.appbuilder.get_app.config["AUTH_ROLE_PUBLIC"]
return [current_app.appbuilder.sm.find_role(public_role)] if public_role else []
return user.roles
|
def get_user_roles(user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
if user.is_anonymous:
public_role = current_app.appbuilder.config.get("AUTH_ROLE_PUBLIC")
return (
[current_app.appbuilder.security_manager.find_role(public_role)]
if public_role
else []
)
return user.roles
|
https://github.com/apache/airflow/issues/8815
|
-------------------------------------------------------------------------------
Node: 11af45a15833
-------------------------------------------------------------------------------
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/airflow/.local/lib/python3.6/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 1935, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/security/decorators.py", line 109, in wraps
return f(self, *args, **kwargs)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/views.py", line 551, in list
widgets = self._list()
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/baseviews.py", line 1127, in _list
page_size=page_size,
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/baseviews.py", line 1026, in _get_list_widget
page_size=page_size,
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/models/sqla/interface.py", line 168, in query
query_count = self._get_base_query(query=query_count, filters=filters)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/models/sqla/interface.py", line 85, in _get_base_query
query = filters.apply_all(query)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/models/filters.py", line 269, in apply_all
query = flt.apply(query, value)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www_rbac/views.py", line 2187, in apply
if appbuilder.sm.has_all_dags_access():
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www_rbac/security.py", line 320, in has_all_dags_access
self._has_role(['Admin', 'Viewer', 'Op', 'User']) or
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www_rbac/security.py", line 299, in _has_role
[r.name in role_name_or_list for r in self.get_user_roles()])
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www_rbac/security.py", line 229, in get_user_roles
public_role = appbuilder.config.get('AUTH_ROLE_PUBLIC')
AttributeError: 'NoneType' object has no attribute 'config'
|
AttributeError
|
def read(self, filenames, encoding=None):
super().read(filenames=filenames, encoding=encoding)
|
def read(self, filenames, encoding=None):
super().read(filenames=filenames, encoding=encoding)
self._validate()
|
https://github.com/apache/airflow/issues/13254
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/site-packages/airflow/__init__.py", line 34, in <module>
from airflow import settings
File "/usr/local/lib/python3.6/site-packages/airflow/settings.py", line 35, in <module>
from airflow.configuration import AIRFLOW_HOME, WEBSERVER_CONFIG, conf # NOQA F401
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 786, in <module>
conf.read(AIRFLOW_CONFIG)
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 447, in read
self._validate()
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 196, in _validate
self._validate_config_dependencies()
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 224, in _validate_config_dependencies
is_sqlite = "sqlite" in self.get('core', 'sql_alchemy_conn')
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 324, in get
option = self._get_option_from_secrets(deprecated_key, deprecated_section, key, section)
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 342, in _get_option_from_secrets
option = self._get_secret_option(section, key)
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 303, in _get_secret_option
return _get_config_value_from_secret_backend(secrets_path)
NameError: name '_get_config_value_from_secret_backend' is not defined
|
NameError
|
def read_dict(self, dictionary, source="<dict>"):
super().read_dict(dictionary=dictionary, source=source)
|
def read_dict(self, dictionary, source="<dict>"):
super().read_dict(dictionary=dictionary, source=source)
self._validate()
|
https://github.com/apache/airflow/issues/13254
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/site-packages/airflow/__init__.py", line 34, in <module>
from airflow import settings
File "/usr/local/lib/python3.6/site-packages/airflow/settings.py", line 35, in <module>
from airflow.configuration import AIRFLOW_HOME, WEBSERVER_CONFIG, conf # NOQA F401
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 786, in <module>
conf.read(AIRFLOW_CONFIG)
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 447, in read
self._validate()
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 196, in _validate
self._validate_config_dependencies()
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 224, in _validate_config_dependencies
is_sqlite = "sqlite" in self.get('core', 'sql_alchemy_conn')
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 324, in get
option = self._get_option_from_secrets(deprecated_key, deprecated_section, key, section)
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 342, in _get_option_from_secrets
option = self._get_secret_option(section, key)
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 303, in _get_secret_option
return _get_config_value_from_secret_backend(secrets_path)
NameError: name '_get_config_value_from_secret_backend' is not defined
|
NameError
|
def upgrade(): # noqa: D103
# We previously had a KnownEvent's table, but we deleted the table without
# a down migration to remove it (so we didn't delete anyone's data if they
# were happing to use the feature.
#
# But before we can delete the users table we need to drop the FK
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
tables = inspector.get_table_names()
if "known_event" in tables:
for fkey in inspector.get_foreign_keys(
table_name="known_event", referred_table="users"
):
op.drop_constraint(fkey["name"], "known_event", type_="foreignkey")
if "chart" in tables:
op.drop_table(
"chart",
)
if "users" in tables:
op.drop_table("users")
|
def upgrade(): # noqa: D103
# We previously had a KnownEvent's table, but we deleted the table without
# a down migration to remove it (so we didn't delete anyone's data if they
# were happing to use the feature.
#
# But before we can delete the users table we need to drop the FK
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
tables = inspector.get_table_names()
if "known_event" in tables:
op.drop_constraint("known_event_user_id_fkey", "known_event")
if "chart" in tables:
op.drop_table(
"chart",
)
if "users" in tables:
op.drop_table("users")
|
https://github.com/apache/airflow/issues/13222
|
initdb_1 | DB: mysql://**:***@x.x.x.x:****/mydb
initdb_1 | [2020-12-21 15:32:09,044] {db.py:678} INFO - Creating tables
initdb_1 | INFO [alembic.runtime.migration] Context impl MySQLImpl.
initdb_1 | INFO [alembic.runtime.migration] Will assume non-transactional DDL.
initdb_1 | INFO [alembic.runtime.migration] Running upgrade 03afc6b6f902 -> cf5dc11e79ad, drop_user_and_chart
initdb_1 | Traceback (most recent call last):
initdb_1 | File "/usr/local/bin/airflow", line 8, in <module>
initdb_1 | sys.exit(main())
initdb_1 | File "/usr/local/lib/python3.7/site-packages/airflow/__main__.py", line 40, in main
initdb_1 | args.func(args)
initdb_1 | File "/usr/local/lib/python3.7/site-packages/airflow/cli/cli_parser.py", line 48, in command
initdb_1 | return func(*args, **kwargs)
initdb_1 | File "/usr/local/lib/python3.7/site-packages/airflow/utils/cli.py", line 89, in wrapper
initdb_1 | return f(*args, **kwargs)
initdb_1 | File "/usr/local/lib/python3.7/site-packages/airflow/cli/commands/db_command.py", line 48, in upgradedb
initdb_1 | db.upgradedb()
initdb_1 | File "/usr/local/lib/python3.7/site-packages/airflow/utils/db.py", line 688, in upgradedb
initdb_1 | command.upgrade(config, 'heads')
initdb_1 | File "/usr/local/lib/python3.7/site-packages/alembic/command.py", line 298, in upgrade
initdb_1 | script.run_env()
initdb_1 | File "/usr/local/lib/python3.7/site-packages/alembic/script/base.py", line 489, in run_env
initdb_1 | util.load_python_file(self.dir, "env.py")
initdb_1 | File "/usr/local/lib/python3.7/site-packages/alembic/util/pyfiles.py", line 98, in load_python_file
initdb_1 | module = load_module_py(module_id, path)
initdb_1 | File "/usr/local/lib/python3.7/site-packages/alembic/util/compat.py", line 184, in load_module_py
initdb_1 | spec.loader.exec_module(module)
initdb_1 | File "<frozen importlib._bootstrap_external>", line 728, in exec_module
initdb_1 | File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
initdb_1 | File "/usr/local/lib/python3.7/site-packages/airflow/migrations/env.py", line 108, in <module>
initdb_1 | run_migrations_online()
initdb_1 | File "/usr/local/lib/python3.7/site-packages/airflow/migrations/env.py", line 102, in run_migrations_online
initdb_1 | context.run_migrations()
initdb_1 | File "<string>", line 8, in run_migrations
initdb_1 | File "/usr/local/lib/python3.7/site-packages/alembic/runtime/environment.py", line 846, in run_migrations
initdb_1 | self.get_context().run_migrations(**kw)
initdb_1 | File "/usr/local/lib/python3.7/site-packages/alembic/runtime/migration.py", line 522, in run_migrations
initdb_1 | step.migration_fn(**kw)
initdb_1 | File "/usr/local/lib/python3.7/site-packages/airflow/migrations/versions/cf5dc11e79ad_drop_user_and_chart.py", line 49, in upgrade
initdb_1 | op.drop_constraint('known_event_user_id_fkey', 'known_event')
initdb_1 | File "<string>", line 8, in drop_constraint
initdb_1 | File "<string>", line 3, in drop_constraint
initdb_1 | File "/usr/local/lib/python3.7/site-packages/alembic/operations/ops.py", line 159, in drop_constraint
initdb_1 | return operations.invoke(op)
initdb_1 | File "/usr/local/lib/python3.7/site-packages/alembic/operations/base.py", line 373, in invoke
initdb_1 | return fn(self, operation)
initdb_1 | File "/usr/local/lib/python3.7/site-packages/alembic/operations/toimpl.py", line 163, in drop_constraint
initdb_1 | schema=operation.schema,
initdb_1 | File "/usr/local/lib/python3.7/site-packages/alembic/ddl/mysql.py", line 113, in drop_constraint
initdb_1 | super(MySQLImpl, self).drop_constraint(const)
initdb_1 | File "/usr/local/lib/python3.7/site-packages/alembic/ddl/impl.py", line 248, in drop_constraint
initdb_1 | self._exec(schema.DropConstraint(const))
initdb_1 | File "/usr/local/lib/python3.7/site-packages/alembic/ddl/impl.py", line 141, in _exec
initdb_1 | return conn.execute(construct, *multiparams, **params)
initdb_1 | File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1011, in execute
initdb_1 | return meth(self, multiparams, params)
initdb_1 | File "/usr/local/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 72, in _execute_on_connection
initdb_1 | return connection._execute_ddl(self, multiparams, params)
initdb_1 | File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1066, in _execute_ddl
initdb_1 | else None,
initdb_1 | File "<string>", line 1, in <lambda>
initdb_1 | File "/usr/local/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 481, in compile
initdb_1 | return self._compiler(dialect, bind=bind, **kw)
initdb_1 | File "/usr/local/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 29, in _compiler
initdb_1 | return dialect.ddl_compiler(dialect, self, **kw)
initdb_1 | File "/usr/local/lib/python3.7/site-packages/sqlalchemy/sql/compiler.py", line 322, in __init__
initdb_1 | self.string = self.process(self.statement, **compile_kwargs)
initdb_1 | File "/usr/local/lib/python3.7/site-packages/sqlalchemy/sql/compiler.py", line 352, in process
initdb_1 | return obj._compiler_dispatch(self, **kwargs)
initdb_1 | File "/usr/local/lib/python3.7/site-packages/sqlalchemy/ext/compiler.py", line 441, in <lambda>
initdb_1 | lambda *arg, **kw: existing(*arg, **kw),
initdb_1 | File "/usr/local/lib/python3.7/site-packages/sqlalchemy/ext/compiler.py", line 486, in __call__
initdb_1 | return fn(element, compiler, **kw)
initdb_1 | File "/usr/local/lib/python3.7/site-packages/alembic/ddl/mysql.py", line 394, in _mysql_drop_constraint
initdb_1 | "No generic 'DROP CONSTRAINT' in MySQL - "
initdb_1 | NotImplementedError: No generic 'DROP CONSTRAINT' in MySQL - please specify constraint type
|
NotImplementedError
|
def load_entrypoint_plugins(entry_points, airflow_plugins):
"""
Load AirflowPlugin subclasses from the entrypoints
provided. The entry_point group should be 'airflow.plugins'.
:param entry_points: A collection of entrypoints to search for plugins
:type entry_points: Generator[setuptools.EntryPoint, None, None]
:param airflow_plugins: A collection of existing airflow plugins to
ensure we don't load duplicates
:type airflow_plugins: list[type[airflow.plugins_manager.AirflowPlugin]]
:rtype: list[airflow.plugins_manager.AirflowPlugin]
"""
global import_errors # pylint: disable=global-statement
for entry_point, dist in entry_points:
log.debug("Importing entry_point plugin %s", entry_point.name)
try:
plugin_obj = entry_point.load()
plugin_obj.__usable_import_name = entry_point.module
if not is_valid_plugin(plugin_obj, airflow_plugins):
continue
if callable(getattr(plugin_obj, "on_load", None)):
plugin_obj.on_load()
airflow_plugins.append(plugin_obj)
except Exception as e: # pylint: disable=broad-except
log.exception("Failed to import plugin %s", entry_point.name)
import_errors[entry_point.module] = str(e)
return airflow_plugins
|
def load_entrypoint_plugins(entry_points, airflow_plugins):
"""
Load AirflowPlugin subclasses from the entrypoints
provided. The entry_point group should be 'airflow.plugins'.
:param entry_points: A collection of entrypoints to search for plugins
:type entry_points: Generator[setuptools.EntryPoint, None, None]
:param airflow_plugins: A collection of existing airflow plugins to
ensure we don't load duplicates
:type airflow_plugins: list[type[airflow.plugins_manager.AirflowPlugin]]
:rtype: list[airflow.plugins_manager.AirflowPlugin]
"""
global import_errors # pylint: disable=global-statement
for entry_point, dist in entry_points:
log.debug("Importing entry_point plugin %s", entry_point.name)
try:
plugin_obj = entry_point.load()
plugin_obj.__usable_import_name = entry_point.module
if not is_valid_plugin(plugin_obj, airflow_plugins):
continue
if callable(getattr(plugin_obj, "on_load", None)):
plugin_obj.on_load()
airflow_plugins.append(plugin_obj)
except Exception as e: # pylint: disable=broad-except
log.exception("Failed to import plugin %s", entry_point.name)
import_errors[entry_point.module_name] = str(e)
return airflow_plugins
|
https://github.com/apache/airflow/issues/12855
|
[2020-12-06 15:33:25,513] {plugins_manager.py:159} ERROR - Failed to import plugin AirflowPrometheus
worker_1 | Traceback (most recent call last):
worker_1 | File "/home/airflow/.local/lib/python3.8/site-packages/airflow/plugins_manager.py", line 150, in load_entrypoint_plugins
worker_1 | plugin_obj.__usable_import_name = entry_point.module
worker_1 | AttributeError: 'EntryPoint' object has no attribute 'module'
worker_1 | Traceback (most recent call last):
worker_1 | File "/home/airflow/.local/lib/python3.8/site-packages/airflow/plugins_manager.py", line 150, in load_entrypoint_plugins
worker_1 | plugin_obj.__usable_import_name = entry_point.module
worker_1 | AttributeError: 'EntryPoint' object has no attribute 'module'
|
AttributeError
|
def execute(self, context: Dict):
if isinstance(self.execution_date, datetime.datetime):
execution_date = self.execution_date
elif isinstance(self.execution_date, str):
execution_date = timezone.parse(self.execution_date)
self.execution_date = execution_date
else:
execution_date = timezone.utcnow()
run_id = DagRun.generate_run_id(DagRunType.MANUAL, execution_date)
try:
# Ignore MyPy type for self.execution_date
# because it doesn't pick up the timezone.parse() for strings
dag_run = trigger_dag(
dag_id=self.trigger_dag_id,
run_id=run_id,
conf=self.conf,
execution_date=self.execution_date,
replace_microseconds=False,
)
except DagRunAlreadyExists as e:
if self.reset_dag_run:
self.log.info("Clearing %s on %s", self.trigger_dag_id, self.execution_date)
# Get target dag object and call clear()
dag_model = DagModel.get_current(self.trigger_dag_id)
if dag_model is None:
raise DagNotFound(f"Dag id {self.trigger_dag_id} not found in DagModel")
dag_bag = DagBag(dag_folder=dag_model.fileloc, read_dags_from_db=True)
dag = dag_bag.get_dag(self.trigger_dag_id)
dag.clear(start_date=self.execution_date, end_date=self.execution_date)
dag_run = DagRun.find(dag_id=dag.dag_id, run_id=run_id)[0]
else:
raise e
if self.wait_for_completion:
# wait for dag to complete
while True:
self.log.info(
"Waiting for %s on %s to become allowed state %s ...",
self.trigger_dag_id,
dag_run.execution_date,
self.allowed_states,
)
time.sleep(self.poke_interval)
dag_run.refresh_from_db()
state = dag_run.state
if state in self.failed_states:
raise AirflowException(
f"{self.trigger_dag_id} failed with failed states {state}"
)
if state in self.allowed_states:
self.log.info(
"%s finished with allowed state %s", self.trigger_dag_id, state
)
return
|
def execute(self, context: Dict):
if isinstance(self.execution_date, datetime.datetime):
execution_date = self.execution_date
elif isinstance(self.execution_date, str):
execution_date = timezone.parse(self.execution_date)
self.execution_date = execution_date
else:
execution_date = timezone.utcnow()
run_id = DagRun.generate_run_id(DagRunType.MANUAL, execution_date)
try:
# Ignore MyPy type for self.execution_date
# because it doesn't pick up the timezone.parse() for strings
dag_run = trigger_dag(
dag_id=self.trigger_dag_id,
run_id=run_id,
conf=self.conf,
execution_date=self.execution_date,
replace_microseconds=False,
)
except DagRunAlreadyExists as e:
if self.reset_dag_run:
self.log.info("Clearing %s on %s", self.trigger_dag_id, self.execution_date)
# Get target dag object and call clear()
dag_model = DagModel.get_current(self.trigger_dag_id)
if dag_model is None:
raise DagNotFound(f"Dag id {self.trigger_dag_id} not found in DagModel")
dag_bag = DagBag(dag_folder=dag_model.fileloc, read_dags_from_db=True)
dag = dag_bag.get_dag(self.trigger_dag_id)
dag.clear(start_date=self.execution_date, end_date=self.execution_date)
else:
raise e
if self.wait_for_completion:
# wait for dag to complete
while True:
self.log.info(
"Waiting for %s on %s to become allowed state %s ...",
self.trigger_dag_id,
dag_run.execution_date,
self.allowed_states,
)
time.sleep(self.poke_interval)
dag_run.refresh_from_db()
state = dag_run.state
if state in self.failed_states:
raise AirflowException(
f"{self.trigger_dag_id} failed with failed states {state}"
)
if state in self.allowed_states:
self.log.info(
"%s finished with allowed state %s", self.trigger_dag_id, state
)
return
|
https://github.com/apache/airflow/issues/12587
|
Traceback (most recent call last):
File "/usr/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 984, in _run_raw_task
result = task_copy.execute(context=context)
File "/usr/local/airflow/plugins/custom_trigger_operator.py", line 138, in execute
dag_run.execution_date,
UnboundLocalError: local variable 'dag_run' referenced before assignment
|
UnboundLocalError
|
def _convert_to_airflow_pod(pod):
"""
Converts a k8s V1Pod object into an `airflow.kubernetes.pod.Pod` object.
This function is purely for backwards compatibility
"""
base_container = pod.spec.containers[0] # type: k8s.V1Container
env_vars, secrets = _extract_env_vars_and_secrets(base_container.env)
volumes = _extract_volumes(pod.spec.volumes)
api_client = ApiClient()
init_containers = pod.spec.init_containers
image_pull_secrets = pod.spec.image_pull_secrets or []
if pod.spec.init_containers is not None:
init_containers = [
api_client.sanitize_for_serialization(i) for i in pod.spec.init_containers
]
dummy_pod = Pod(
image=base_container.image,
envs=env_vars,
cmds=base_container.command,
args=base_container.args,
labels=pod.metadata.labels,
annotations=pod.metadata.annotations,
node_selectors=pod.spec.node_selector,
name=pod.metadata.name,
ports=_extract_ports(base_container.ports),
volumes=volumes,
volume_mounts=_extract_volume_mounts(base_container.volume_mounts),
namespace=pod.metadata.namespace,
image_pull_policy=base_container.image_pull_policy or "IfNotPresent",
tolerations=pod.spec.tolerations,
init_containers=init_containers,
image_pull_secrets=",".join([i.name for i in image_pull_secrets]),
resources=base_container.resources,
service_account_name=pod.spec.service_account_name,
secrets=secrets,
affinity=api_client.sanitize_for_serialization(pod.spec.affinity),
hostnetwork=pod.spec.host_network,
security_context=_extract_security_context(pod.spec.security_context),
)
return dummy_pod
|
def _convert_to_airflow_pod(pod):
"""
Converts a k8s V1Pod object into an `airflow.kubernetes.pod.Pod` object.
This function is purely for backwards compatibility
"""
base_container = pod.spec.containers[0] # type: k8s.V1Container
env_vars, secrets = _extract_env_vars_and_secrets(base_container.env)
volumes = _extract_volumes(pod.spec.volumes)
api_client = ApiClient()
init_containers = pod.spec.init_containers
image_pull_secrets = pod.spec.image_pull_secrets or []
if pod.spec.init_containers is not None:
init_containers = [
api_client.sanitize_for_serialization(i) for i in pod.spec.init_containers
]
dummy_pod = Pod(
image=base_container.image,
envs=env_vars,
cmds=base_container.command,
args=base_container.args,
labels=pod.metadata.labels,
annotations=pod.metadata.annotations,
node_selectors=pod.spec.node_selector,
name=pod.metadata.name,
ports=_extract_ports(base_container.ports),
volumes=volumes,
volume_mounts=_extract_volume_mounts(base_container.volume_mounts),
namespace=pod.metadata.namespace,
image_pull_policy=base_container.image_pull_policy or "IfNotPresent",
tolerations=pod.spec.tolerations,
init_containers=init_containers,
image_pull_secrets=",".join([i.name for i in image_pull_secrets]),
resources=base_container.resources,
service_account_name=pod.spec.service_account_name,
secrets=secrets,
affinity=pod.spec.affinity,
hostnetwork=pod.spec.host_network,
security_context=_extract_security_context(pod.spec.security_context),
)
return dummy_pod
|
https://github.com/apache/airflow/issues/11731
|
[2020-10-21 20:55:11,879] {scheduler_job.py:1401} ERROR - Exception when executing execute_helper
Traceback (most recent call last):
File "/usr/lib/python3.7/site-packages/airflow/kubernetes/pod_launcher.py", line 114, in _mutate_pod_backcompat
settings.pod_mutation_hook(dummy_pod)
File "/usr/local/airflow/config/airflow_local_settings.py", line 21, in pod_mutation_hook
pod.affinity.update({})
AttributeError: 'V1Affinity' object has no attribute 'update'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.7/site-packages/airflow/kubernetes/pod_launcher.py", line 124, in _mutate_pod_backcompat
settings.pod_mutation_hook(pod)
File "/usr/local/airflow/config/airflow_local_settings.py", line 13, in pod_mutation_hook
if 'airflow-worker' in pod.labels.keys() or \
AttributeError: 'V1Pod' object has no attribute 'labels'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1399, in _execute
self._execute_helper()
File "/usr/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1470, in _execute_helper
if not self._validate_and_run_task_instances(simple_dag_bag=simple_dag_bag):
File "/usr/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1532, in _validate_and_run_task_instances
self.executor.heartbeat()
File "/usr/lib/python3.7/site-packages/airflow/executors/base_executor.py", line 134, in heartbeat
self.sync()
File "/usr/lib/python3.7/site-packages/airflow/executors/kubernetes_executor.py", line 870, in sync
self.kube_scheduler.run_next(task)
File "/usr/lib/python3.7/site-packages/airflow/executors/kubernetes_executor.py", line 497, in run_next
self.launcher.run_pod_async(pod, **self.kube_config.kube_client_request_args)
File "/usr/lib/python3.7/site-packages/airflow/kubernetes/pod_launcher.py", line 93, in run_pod_async
pod = self._mutate_pod_backcompat(pod)
File "/usr/lib/python3.7/site-packages/airflow/kubernetes/pod_launcher.py", line 127, in _mutate_pod_backcompat
raise Exception([e, e2])
Exception: [AttributeError("'V1Affinity' object has no attribute 'update'"), AttributeError("'V1Pod' object has no attribute 'labels'")]
|
AttributeError
|
def init_on_load(self):
"""
Called by the ORM after the instance has been loaded from the DB or otherwise reconstituted
i.e automatically deserialize Xcom value when loading from DB.
"""
try:
self.value = self.orm_deserialize_value()
except (UnicodeEncodeError, ValueError):
# For backward-compatibility.
# Preventing errors in webserver
# due to XComs mixed with pickled and unpickled.
self.value = pickle.loads(self.value)
|
def init_on_load(self):
"""
Called by the ORM after the instance has been loaded from the DB or otherwise reconstituted
i.e automatically deserialize Xcom value when loading from DB.
"""
try:
self.value = XCom.deserialize_value(self)
except (UnicodeEncodeError, ValueError):
# For backward-compatibility.
# Preventing errors in webserver
# due to XComs mixed with pickled and unpickled.
self.value = pickle.loads(self.value)
|
https://github.com/apache/airflow/issues/12315
|
Python version: 3.8.6
Airflow version: 2.0.0b2
Node: 950a17127708
-------------------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/security/decorators.py", line 109, in wraps
return f(self, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/views.py", line 552, in list
return self.render_template(
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/baseviews.py", line 280, in render_template
return render_template(
File "/usr/local/lib/python3.8/site-packages/flask/templating.py", line 137, in render_template
return _render(
File "/usr/local/lib/python3.8/site-packages/flask/templating.py", line 120, in _render
rv = template.render(context)
File "/usr/local/lib/python3.8/site-packages/jinja2/environment.py", line 1090, in render
self.environment.handle_exception()
File "/usr/local/lib/python3.8/site-packages/jinja2/environment.py", line 832, in handle_exception
reraise(*rewrite_traceback_stack(source=source))
File "/usr/local/lib/python3.8/site-packages/jinja2/_compat.py", line 28, in reraise
raise value.with_traceback(tb)
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 2, in top-level template code
{% import 'appbuilder/general/lib.html' as lib %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/base.html", line 1, in top-level template code
{% extends base_template %}
File "/opt/airflow/airflow/www/templates/airflow/master.html", line 20, in top-level template code
{% extends 'appbuilder/baselayout.html' %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/baselayout.html", line 2, in top-level template code
{% import 'appbuilder/baselib.html' as baselib %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/init.html", line 46, in top-level template code
{% block body %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/baselayout.html", line 19, in block "body"
{% block content %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 13, in block "content"
{% block list_list scoped %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 15, in block "list_list"
{{ widgets.get('list')()|safe }}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/widgets.py", line 37, in __call__
return template.render(args)
File "/usr/local/lib/python3.8/site-packages/jinja2/environment.py", line 1090, in render
self.environment.handle_exception()
File "/usr/local/lib/python3.8/site-packages/jinja2/environment.py", line 832, in handle_exception
reraise(*rewrite_traceback_stack(source=source))
File "/usr/local/lib/python3.8/site-packages/jinja2/_compat.py", line 28, in reraise
raise value.with_traceback(tb)
File "/opt/airflow/airflow/www/templates/airflow/model_list.html", line 21, in top-level template code
{% extends 'appbuilder/general/widgets/base_list.html' %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/general/widgets/base_list.html", line 23, in top-level template code
{% block begin_loop_values %}
File "/opt/airflow/airflow/www/templates/airflow/model_list.html", line 80, in block "begin_loop_values"
{% elif item[value] != None %}
File "/usr/local/lib/python3.8/site-packages/pandas/core/generic.py", line 1326, in __nonzero__
raise ValueError(
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().
|
ValueError
|
def serialize_value(value: Any):
"""Serialize Xcom value to str or pickled object"""
if conf.getboolean("core", "enable_xcom_pickling"):
return pickle.dumps(value)
try:
return json.dumps(value).encode("UTF-8")
except (ValueError, TypeError):
log.error(
"Could not serialize the XCom value into JSON. "
"If you are using pickles instead of JSON "
"for XCom, then you need to enable pickle "
"support for XCom in your airflow config."
)
raise
|
def serialize_value(value: Any):
"""Serialize Xcom value to str or pickled object"""
if conf.getboolean("core", "enable_xcom_pickling"):
return pickle.dumps(value)
try:
return json.dumps(value).encode("UTF-8")
except (ValueError, TypeError):
log.error(
"Could not serialize the XCOM value into JSON. "
"If you are using pickles instead of JSON "
"for XCOM, then you need to enable pickle "
"support for XCOM in your airflow config."
)
raise
|
https://github.com/apache/airflow/issues/12315
|
Python version: 3.8.6
Airflow version: 2.0.0b2
Node: 950a17127708
-------------------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/security/decorators.py", line 109, in wraps
return f(self, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/views.py", line 552, in list
return self.render_template(
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/baseviews.py", line 280, in render_template
return render_template(
File "/usr/local/lib/python3.8/site-packages/flask/templating.py", line 137, in render_template
return _render(
File "/usr/local/lib/python3.8/site-packages/flask/templating.py", line 120, in _render
rv = template.render(context)
File "/usr/local/lib/python3.8/site-packages/jinja2/environment.py", line 1090, in render
self.environment.handle_exception()
File "/usr/local/lib/python3.8/site-packages/jinja2/environment.py", line 832, in handle_exception
reraise(*rewrite_traceback_stack(source=source))
File "/usr/local/lib/python3.8/site-packages/jinja2/_compat.py", line 28, in reraise
raise value.with_traceback(tb)
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 2, in top-level template code
{% import 'appbuilder/general/lib.html' as lib %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/base.html", line 1, in top-level template code
{% extends base_template %}
File "/opt/airflow/airflow/www/templates/airflow/master.html", line 20, in top-level template code
{% extends 'appbuilder/baselayout.html' %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/baselayout.html", line 2, in top-level template code
{% import 'appbuilder/baselib.html' as baselib %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/init.html", line 46, in top-level template code
{% block body %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/baselayout.html", line 19, in block "body"
{% block content %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 13, in block "content"
{% block list_list scoped %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 15, in block "list_list"
{{ widgets.get('list')()|safe }}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/widgets.py", line 37, in __call__
return template.render(args)
File "/usr/local/lib/python3.8/site-packages/jinja2/environment.py", line 1090, in render
self.environment.handle_exception()
File "/usr/local/lib/python3.8/site-packages/jinja2/environment.py", line 832, in handle_exception
reraise(*rewrite_traceback_stack(source=source))
File "/usr/local/lib/python3.8/site-packages/jinja2/_compat.py", line 28, in reraise
raise value.with_traceback(tb)
File "/opt/airflow/airflow/www/templates/airflow/model_list.html", line 21, in top-level template code
{% extends 'appbuilder/general/widgets/base_list.html' %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/general/widgets/base_list.html", line 23, in top-level template code
{% block begin_loop_values %}
File "/opt/airflow/airflow/www/templates/airflow/model_list.html", line 80, in block "begin_loop_values"
{% elif item[value] != None %}
File "/usr/local/lib/python3.8/site-packages/pandas/core/generic.py", line 1326, in __nonzero__
raise ValueError(
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().
|
ValueError
|
def deserialize_value(result: "XCom") -> Any:
"""Deserialize XCom value from str or pickle object"""
enable_pickling = conf.getboolean("core", "enable_xcom_pickling")
if enable_pickling:
return pickle.loads(result.value)
try:
return json.loads(result.value.decode("UTF-8"))
except JSONDecodeError:
log.error(
"Could not deserialize the XCom value from JSON. "
"If you are using pickles instead of JSON "
"for XCom, then you need to enable pickle "
"support for XCom in your airflow config."
)
raise
|
def deserialize_value(result) -> Any:
"""Deserialize Xcom value from str or pickle object"""
enable_pickling = conf.getboolean("core", "enable_xcom_pickling")
if enable_pickling:
return pickle.loads(result.value)
try:
return json.loads(result.value.decode("UTF-8"))
except JSONDecodeError:
log.error(
"Could not deserialize the XCOM value from JSON. "
"If you are using pickles instead of JSON "
"for XCOM, then you need to enable pickle "
"support for XCOM in your airflow config."
)
raise
|
https://github.com/apache/airflow/issues/12315
|
Python version: 3.8.6
Airflow version: 2.0.0b2
Node: 950a17127708
-------------------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/security/decorators.py", line 109, in wraps
return f(self, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/views.py", line 552, in list
return self.render_template(
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/baseviews.py", line 280, in render_template
return render_template(
File "/usr/local/lib/python3.8/site-packages/flask/templating.py", line 137, in render_template
return _render(
File "/usr/local/lib/python3.8/site-packages/flask/templating.py", line 120, in _render
rv = template.render(context)
File "/usr/local/lib/python3.8/site-packages/jinja2/environment.py", line 1090, in render
self.environment.handle_exception()
File "/usr/local/lib/python3.8/site-packages/jinja2/environment.py", line 832, in handle_exception
reraise(*rewrite_traceback_stack(source=source))
File "/usr/local/lib/python3.8/site-packages/jinja2/_compat.py", line 28, in reraise
raise value.with_traceback(tb)
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 2, in top-level template code
{% import 'appbuilder/general/lib.html' as lib %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/base.html", line 1, in top-level template code
{% extends base_template %}
File "/opt/airflow/airflow/www/templates/airflow/master.html", line 20, in top-level template code
{% extends 'appbuilder/baselayout.html' %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/baselayout.html", line 2, in top-level template code
{% import 'appbuilder/baselib.html' as baselib %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/init.html", line 46, in top-level template code
{% block body %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/baselayout.html", line 19, in block "body"
{% block content %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 13, in block "content"
{% block list_list scoped %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 15, in block "list_list"
{{ widgets.get('list')()|safe }}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/widgets.py", line 37, in __call__
return template.render(args)
File "/usr/local/lib/python3.8/site-packages/jinja2/environment.py", line 1090, in render
self.environment.handle_exception()
File "/usr/local/lib/python3.8/site-packages/jinja2/environment.py", line 832, in handle_exception
reraise(*rewrite_traceback_stack(source=source))
File "/usr/local/lib/python3.8/site-packages/jinja2/_compat.py", line 28, in reraise
raise value.with_traceback(tb)
File "/opt/airflow/airflow/www/templates/airflow/model_list.html", line 21, in top-level template code
{% extends 'appbuilder/general/widgets/base_list.html' %}
File "/usr/local/lib/python3.8/site-packages/flask_appbuilder/templates/appbuilder/general/widgets/base_list.html", line 23, in top-level template code
{% block begin_loop_values %}
File "/opt/airflow/airflow/www/templates/airflow/model_list.html", line 80, in block "begin_loop_values"
{% elif item[value] != None %}
File "/usr/local/lib/python3.8/site-packages/pandas/core/generic.py", line 1326, in __nonzero__
raise ValueError(
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().
|
ValueError
|
def __init__(
self,
*,
python_callable: Callable,
task_id: str,
op_args: Tuple[Any],
op_kwargs: Dict[str, Any],
multiple_outputs: bool = False,
**kwargs,
) -> None:
kwargs["task_id"] = self._get_unique_task_id(
task_id, kwargs.get("dag"), kwargs.get("task_group")
)
super().__init__(**kwargs)
self.python_callable = python_callable
# Check that arguments can be binded
signature(python_callable).bind(*op_args, **op_kwargs)
self.multiple_outputs = multiple_outputs
self.op_args = op_args
self.op_kwargs = op_kwargs
|
def __init__(
self,
*,
python_callable: Callable,
task_id: str,
op_args: Tuple[Any],
op_kwargs: Dict[str, Any],
multiple_outputs: bool = False,
**kwargs,
) -> None:
kwargs["task_id"] = self._get_unique_task_id(task_id, kwargs.get("dag"))
super().__init__(**kwargs)
self.python_callable = python_callable
# Check that arguments can be binded
signature(python_callable).bind(*op_args, **op_kwargs)
self.multiple_outputs = multiple_outputs
self.op_args = op_args
self.op_kwargs = op_kwargs
|
https://github.com/apache/airflow/issues/12309
|
Broken DAG: [/files/dags/test.py] Traceback (most recent call last):
File "/opt/airflow/airflow/models/baseoperator.py", line 410, in __init__
task_group.add(self)
File "/opt/airflow/airflow/utils/task_group.py", line 140, in add
raise DuplicateTaskIdFound(f"Task id '{key}' has already been added to the DAG")
airflow.exceptions.DuplicateTaskIdFound: Task id 'show_tasks.show' has already been added to the DAG
|
airflow.exceptions.DuplicateTaskIdFound
|
def _get_unique_task_id(
task_id: str, dag: Optional[DAG] = None, task_group: Optional[TaskGroup] = None
) -> str:
"""
Generate unique task id given a DAG (or if run in a DAG context)
Ids are generated by appending a unique number to the end of
the original task id.
Example:
task_id
task_id__1
task_id__2
...
task_id__20
"""
dag = dag or DagContext.get_current_dag()
if not dag:
return task_id
# We need to check if we are in the context of TaskGroup as the task_id may
# already be altered
task_group = task_group or TaskGroupContext.get_current_task_group(dag)
tg_task_id = task_group.child_id(task_id) if task_group else task_id
if tg_task_id not in dag.task_ids:
return task_id
core = re.split(r"__\d+$", task_id)[0]
suffixes = sorted(
[
int(re.split(r"^.+__", task_id)[1])
for task_id in dag.task_ids
if re.match(rf"^{core}__\d+$", task_id)
]
)
if not suffixes:
return f"{core}__1"
return f"{core}__{suffixes[-1] + 1}"
|
def _get_unique_task_id(task_id: str, dag: Optional[DAG] = None) -> str:
"""
Generate unique task id given a DAG (or if run in a DAG context)
Ids are generated by appending a unique number to the end of
the original task id.
Example:
task_id
task_id__1
task_id__2
...
task_id__20
"""
dag = dag or DagContext.get_current_dag()
if not dag or task_id not in dag.task_ids:
return task_id
core = re.split(r"__\d+$", task_id)[0]
suffixes = sorted(
[
int(re.split(r"^.+__", task_id)[1])
for task_id in dag.task_ids
if re.match(rf"^{core}__\d+$", task_id)
]
)
if not suffixes:
return f"{core}__1"
return f"{core}__{suffixes[-1] + 1}"
|
https://github.com/apache/airflow/issues/12309
|
Broken DAG: [/files/dags/test.py] Traceback (most recent call last):
File "/opt/airflow/airflow/models/baseoperator.py", line 410, in __init__
task_group.add(self)
File "/opt/airflow/airflow/utils/task_group.py", line 140, in add
raise DuplicateTaskIdFound(f"Task id '{key}' has already been added to the DAG")
airflow.exceptions.DuplicateTaskIdFound: Task id 'show_tasks.show' has already been added to the DAG
|
airflow.exceptions.DuplicateTaskIdFound
|
def task_run(args, dag=None):
"""Runs a single task instance"""
# Load custom airflow config
if args.cfg_path:
with open(args.cfg_path, "r") as conf_file:
conf_dict = json.load(conf_file)
if os.path.exists(args.cfg_path):
os.remove(args.cfg_path)
conf.read_dict(conf_dict, source=args.cfg_path)
settings.configure_vars()
# IMPORTANT, have to use the NullPool, otherwise, each "run" command may leave
# behind multiple open sleeping connections while heartbeating, which could
# easily exceed the database connection limit when
# processing hundreds of simultaneous tasks.
settings.configure_orm(disable_connection_pool=True)
if dag and args.pickle:
raise AirflowException(
"You cannot use the --pickle option when using DAG.cli() method."
)
elif args.pickle:
print(f"Loading pickle id: {args.pickle}")
dag = get_dag_by_pickle(args.pickle)
elif not dag:
dag = get_dag(args.subdir, args.dag_id)
else:
# Use DAG from parameter
pass
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
ti.refresh_from_db()
ti.init_run_context(raw=args.raw)
hostname = get_hostname()
print(f"Running {ti} on host {hostname}")
if args.interactive:
_run_task_by_selected_method(args, dag, ti)
else:
if settings.DONOT_MODIFY_HANDLERS:
with (
redirect_stdout(StreamLogWriter(ti.log, logging.INFO)),
redirect_stderr(StreamLogWriter(ti.log, logging.WARN)),
):
_run_task_by_selected_method(args, dag, ti)
else:
# Get all the Handlers from 'airflow.task' logger
# Add these handlers to the root logger so that we can get logs from
# any custom loggers defined in the DAG
airflow_logger_handlers = logging.getLogger("airflow.task").handlers
root_logger = logging.getLogger()
root_logger_handlers = root_logger.handlers
# Remove all handlers from Root Logger to avoid duplicate logs
for handler in root_logger_handlers:
root_logger.removeHandler(handler)
for handler in airflow_logger_handlers:
root_logger.addHandler(handler)
root_logger.setLevel(logging.getLogger("airflow.task").level)
with (
redirect_stdout(StreamLogWriter(ti.log, logging.INFO)),
redirect_stderr(StreamLogWriter(ti.log, logging.WARN)),
):
_run_task_by_selected_method(args, dag, ti)
# We need to restore the handlers to the loggers as celery worker process
# can call this command multiple times,
# so if we don't reset this then logs from next task would go to the wrong place
for handler in airflow_logger_handlers:
root_logger.removeHandler(handler)
for handler in root_logger_handlers:
root_logger.addHandler(handler)
logging.shutdown()
|
def task_run(args, dag=None):
"""Runs a single task instance"""
# Load custom airflow config
if args.cfg_path:
with open(args.cfg_path, "r") as conf_file:
conf_dict = json.load(conf_file)
if os.path.exists(args.cfg_path):
os.remove(args.cfg_path)
conf.read_dict(conf_dict, source=args.cfg_path)
settings.configure_vars()
# IMPORTANT, have to use the NullPool, otherwise, each "run" command may leave
# behind multiple open sleeping connections while heartbeating, which could
# easily exceed the database connection limit when
# processing hundreds of simultaneous tasks.
settings.configure_orm(disable_connection_pool=True)
if dag and args.pickle:
raise AirflowException(
"You cannot use the --pickle option when using DAG.cli() method."
)
elif args.pickle:
print(f"Loading pickle id: {args.pickle}")
dag = get_dag_by_pickle(args.pickle)
elif not dag:
dag = get_dag(args.subdir, args.dag_id)
else:
# Use DAG from parameter
pass
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
ti.init_run_context(raw=args.raw)
hostname = get_hostname()
print(f"Running {ti} on host {hostname}")
if args.interactive:
_run_task_by_selected_method(args, dag, ti)
else:
if settings.DONOT_MODIFY_HANDLERS:
with (
redirect_stdout(StreamLogWriter(ti.log, logging.INFO)),
redirect_stderr(StreamLogWriter(ti.log, logging.WARN)),
):
_run_task_by_selected_method(args, dag, ti)
else:
# Get all the Handlers from 'airflow.task' logger
# Add these handlers to the root logger so that we can get logs from
# any custom loggers defined in the DAG
airflow_logger_handlers = logging.getLogger("airflow.task").handlers
root_logger = logging.getLogger()
root_logger_handlers = root_logger.handlers
# Remove all handlers from Root Logger to avoid duplicate logs
for handler in root_logger_handlers:
root_logger.removeHandler(handler)
for handler in airflow_logger_handlers:
root_logger.addHandler(handler)
root_logger.setLevel(logging.getLogger("airflow.task").level)
with (
redirect_stdout(StreamLogWriter(ti.log, logging.INFO)),
redirect_stderr(StreamLogWriter(ti.log, logging.WARN)),
):
_run_task_by_selected_method(args, dag, ti)
# We need to restore the handlers to the loggers as celery worker process
# can call this command multiple times,
# so if we don't reset this then logs from next task would go to the wrong place
for handler in airflow_logger_handlers:
root_logger.removeHandler(handler)
for handler in root_logger_handlers:
root_logger.addHandler(handler)
logging.shutdown()
|
https://github.com/apache/airflow/issues/11717
|
[...]
--------------------------------------------------------------------------------
[2020-10-21 13:29:07,958] {taskinstance.py:1020} INFO - Starting attempt 1 of 2
[2020-10-21 13:29:07,959] {taskinstance.py:1021} INFO -
--------------------------------------------------------------------------------
[...]
[2020-10-21 13:29:08,163] {logging_mixin.py:110} INFO - Running demo_task, try_number = 1
[2020-10-21 13:29:08,164] {taskinstance.py:1348} ERROR - Shan't
Traceback (most recent call last):
[...]
ValueError: Shan't
[2020-10-21 13:29:08,168] {taskinstance.py:1392} INFO - Marking task as UP_FOR_RETRY. dag_id=trynumber_demo, task_id=demo_task, execution_date=20201021T122907, start_date=20201021T122907, end_date=20201021T122908
[...]
[2020-10-21 13:29:09,121] {taskinstance.py:1019} INFO -
--------------------------------------------------------------------------------
[2020-10-21 13:29:09,121] {taskinstance.py:1020} INFO - Starting attempt 2 of 2
[2020-10-21 13:29:09,121] {taskinstance.py:1021} INFO -
--------------------------------------------------------------------------------
[...]
[2020-10-21 13:29:09,333] {logging_mixin.py:110} INFO - Running demo_task, try_number = 2
[2020-10-21 13:29:09,334] {python.py:141} INFO - Done. Returned value was: None
[2020-10-21 13:29:09,355] {taskinstance.py:1143} INFO - Marking task as SUCCESS.dag_id=trynumber_demo, task_id=demo_task, execution_date=20201021T122907, start_date=20201021T122909, end_date=20201021T122909
[2020-10-21 13:29:09,404] {local_task_job.py:117} INFO - Task exited with return code 0
|
ValueError
|
def _serialize(
cls, var: Any
) -> Any: # Unfortunately there is no support for recursive types in mypy
"""Helper function of depth first search for serialization.
The serialization protocol is:
(1) keeping JSON supported types: primitives, dict, list;
(2) encoding other types as ``{TYPE: 'foo', VAR: 'bar'}``, the deserialization
step decode VAR according to TYPE;
(3) Operator has a special field CLASS to record the original class
name for displaying in UI.
"""
try:
if cls._is_primitive(var):
# enum.IntEnum is an int instance, it causes json dumps error so we use its value.
if isinstance(var, enum.Enum):
return var.value
return var
elif isinstance(var, dict):
return cls._encode(
{str(k): cls._serialize(v) for k, v in var.items()}, type_=DAT.DICT
)
elif HAS_KUBERNETES and isinstance(var, k8s.V1Pod):
json_pod = PodGenerator.serialize_pod(var)
return cls._encode(json_pod, type_=DAT.POD)
elif isinstance(var, list):
return [cls._serialize(v) for v in var]
elif HAS_KUBERNETES and isinstance(var, k8s.V1Pod):
json_pod = PodGenerator.serialize_pod(var)
return cls._encode(json_pod, type_=DAT.POD)
elif isinstance(var, DAG):
return SerializedDAG.serialize_dag(var)
elif isinstance(var, BaseOperator):
return SerializedBaseOperator.serialize_operator(var)
elif isinstance(var, cls._datetime_types):
return cls._encode(var.timestamp(), type_=DAT.DATETIME)
elif isinstance(var, datetime.timedelta):
return cls._encode(var.total_seconds(), type_=DAT.TIMEDELTA)
elif isinstance(var, Timezone):
return cls._encode(str(var.name), type_=DAT.TIMEZONE)
elif isinstance(var, relativedelta.relativedelta):
encoded = {
k: v for k, v in var.__dict__.items() if not k.startswith("_") and v
}
if var.weekday and var.weekday.n:
# Every n'th Friday for example
encoded["weekday"] = [var.weekday.weekday, var.weekday.n]
elif var.weekday:
encoded["weekday"] = [var.weekday.weekday]
return cls._encode(encoded, type_=DAT.RELATIVEDELTA)
elif callable(var):
return str(get_python_source(var))
elif isinstance(var, set):
# FIXME: casts set to list in customized serialization in future.
return cls._encode([cls._serialize(v) for v in var], type_=DAT.SET)
elif isinstance(var, tuple):
# FIXME: casts tuple to list in customized serialization in future.
return cls._encode([cls._serialize(v) for v in var], type_=DAT.TUPLE)
elif isinstance(var, TaskGroup):
return SerializedTaskGroup.serialize_task_group(var)
else:
log.debug("Cast type %s to str in serialization.", type(var))
return str(var)
except Exception: # pylint: disable=broad-except
log.error("Failed to stringify.", exc_info=True)
return FAILED
|
def _serialize(
cls, var: Any
) -> Any: # Unfortunately there is no support for recursive types in mypy
"""Helper function of depth first search for serialization.
The serialization protocol is:
(1) keeping JSON supported types: primitives, dict, list;
(2) encoding other types as ``{TYPE: 'foo', VAR: 'bar'}``, the deserialization
step decode VAR according to TYPE;
(3) Operator has a special field CLASS to record the original class
name for displaying in UI.
"""
try:
if cls._is_primitive(var):
# enum.IntEnum is an int instance, it causes json dumps error so we use its value.
if isinstance(var, enum.Enum):
return var.value
return var
elif isinstance(var, dict):
return cls._encode(
{str(k): cls._serialize(v) for k, v in var.items()}, type_=DAT.DICT
)
elif isinstance(var, k8s.V1Pod):
json_pod = PodGenerator.serialize_pod(var)
return cls._encode(json_pod, type_=DAT.POD)
elif isinstance(var, list):
return [cls._serialize(v) for v in var]
elif isinstance(var, k8s.V1Pod):
json_pod = PodGenerator.serialize_pod(var)
return cls._encode(json_pod, type_=DAT.POD)
elif isinstance(var, DAG):
return SerializedDAG.serialize_dag(var)
elif isinstance(var, BaseOperator):
return SerializedBaseOperator.serialize_operator(var)
elif isinstance(var, cls._datetime_types):
return cls._encode(var.timestamp(), type_=DAT.DATETIME)
elif isinstance(var, datetime.timedelta):
return cls._encode(var.total_seconds(), type_=DAT.TIMEDELTA)
elif isinstance(var, Timezone):
return cls._encode(str(var.name), type_=DAT.TIMEZONE)
elif isinstance(var, relativedelta.relativedelta):
encoded = {
k: v for k, v in var.__dict__.items() if not k.startswith("_") and v
}
if var.weekday and var.weekday.n:
# Every n'th Friday for example
encoded["weekday"] = [var.weekday.weekday, var.weekday.n]
elif var.weekday:
encoded["weekday"] = [var.weekday.weekday]
return cls._encode(encoded, type_=DAT.RELATIVEDELTA)
elif callable(var):
return str(get_python_source(var))
elif isinstance(var, set):
# FIXME: casts set to list in customized serialization in future.
return cls._encode([cls._serialize(v) for v in var], type_=DAT.SET)
elif isinstance(var, tuple):
# FIXME: casts tuple to list in customized serialization in future.
return cls._encode([cls._serialize(v) for v in var], type_=DAT.TUPLE)
elif isinstance(var, TaskGroup):
return SerializedTaskGroup.serialize_task_group(var)
else:
log.debug("Cast type %s to str in serialization.", type(var))
return str(var)
except Exception: # pylint: disable=broad-except
log.error("Failed to stringify.", exc_info=True)
return FAILED
|
https://github.com/apache/airflow/issues/11556
|
$ airflow db check
Traceback (most recent call last):
File "/.../bin/airflow", line 8, in <module>
sys.exit(main())
File "/.../lib/python3.6/site-packages/airflow/__main__.py", line 40, in main
args.func(args)
File "/.../lib/python3.6/site-packages/airflow/cli/cli_parser.py", line 52, in command
func = import_string(import_path)
File "/.../lib/python3.6/site-packages/airflow/utils/module_loading.py", line 32, in import_string
module = import_module(module_path)
File "/.../lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/.../lib/python3.6/site-packages/airflow/cli/commands/db_command.py", line 24, in <module>
from airflow.utils import cli as cli_utils, db
File "/.../lib/python3.6/site-packages/airflow/utils/db.py", line 26, in <module>
from airflow.jobs.base_job import BaseJob # noqa: F401 # pylint: disable=unused-import
File "/.../lib/python3.6/site-packages/airflow/jobs/__init__.py", line 22, in <module>
import airflow.jobs.scheduler_job # noqa
File "/.../lib/python3.6/site-packages/airflow/jobs/scheduler_job.py", line 50, in <module>
from airflow.models.serialized_dag import SerializedDagModel
File "/.../lib/python3.6/site-packages/airflow/models/serialized_dag.py", line 35, in <module>
from airflow.serialization.serialized_objects import SerializedDAG
File "/.../lib/python3.6/site-packages/airflow/serialization/serialized_objects.py", line 37, in <module>
from airflow.kubernetes.pod_generator import PodGenerator
File "/.../lib/python3.6/site-packages/airflow/kubernetes/pod_generator.py", line 35, in <module>
from kubernetes.client import models as k8s
ModuleNotFoundError: No module named 'kubernetes'
|
ModuleNotFoundError
|
def _deserialize(cls, encoded_var: Any) -> Any: # pylint: disable=too-many-return-statements
"""Helper function of depth first search for deserialization."""
# JSON primitives (except for dict) are not encoded.
if cls._is_primitive(encoded_var):
return encoded_var
elif isinstance(encoded_var, list):
return [cls._deserialize(v) for v in encoded_var]
if not isinstance(encoded_var, dict):
raise ValueError(f"The encoded_var should be dict and is {type(encoded_var)}")
var = encoded_var[Encoding.VAR]
type_ = encoded_var[Encoding.TYPE]
if type_ == DAT.DICT:
return {k: cls._deserialize(v) for k, v in var.items()}
elif type_ == DAT.DAG:
return SerializedDAG.deserialize_dag(var)
elif type_ == DAT.OP:
return SerializedBaseOperator.deserialize_operator(var)
elif type_ == DAT.DATETIME:
return pendulum.from_timestamp(var)
elif type_ == DAT.POD:
if not HAS_KUBERNETES:
raise RuntimeError(
"Cannot deserialize POD objects without kubernetes libraries installed!"
)
pod = PodGenerator.deserialize_model_dict(var)
return pod
elif type_ == DAT.TIMEDELTA:
return datetime.timedelta(seconds=var)
elif type_ == DAT.TIMEZONE:
return Timezone(var)
elif type_ == DAT.RELATIVEDELTA:
if "weekday" in var:
var["weekday"] = relativedelta.weekday(*var["weekday"]) # type: ignore
return relativedelta.relativedelta(**var)
elif type_ == DAT.SET:
return {cls._deserialize(v) for v in var}
elif type_ == DAT.TUPLE:
return tuple([cls._deserialize(v) for v in var])
else:
raise TypeError("Invalid type {!s} in deserialization.".format(type_))
|
def _deserialize(cls, encoded_var: Any) -> Any: # pylint: disable=too-many-return-statements
"""Helper function of depth first search for deserialization."""
# JSON primitives (except for dict) are not encoded.
if cls._is_primitive(encoded_var):
return encoded_var
elif isinstance(encoded_var, list):
return [cls._deserialize(v) for v in encoded_var]
if not isinstance(encoded_var, dict):
raise ValueError(f"The encoded_var should be dict and is {type(encoded_var)}")
var = encoded_var[Encoding.VAR]
type_ = encoded_var[Encoding.TYPE]
if type_ == DAT.DICT:
return {k: cls._deserialize(v) for k, v in var.items()}
elif type_ == DAT.DAG:
return SerializedDAG.deserialize_dag(var)
elif type_ == DAT.OP:
return SerializedBaseOperator.deserialize_operator(var)
elif type_ == DAT.DATETIME:
return pendulum.from_timestamp(var)
elif type_ == DAT.POD:
pod = PodGenerator.deserialize_model_dict(var)
return pod
elif type_ == DAT.TIMEDELTA:
return datetime.timedelta(seconds=var)
elif type_ == DAT.TIMEZONE:
return Timezone(var)
elif type_ == DAT.RELATIVEDELTA:
if "weekday" in var:
var["weekday"] = relativedelta.weekday(*var["weekday"]) # type: ignore
return relativedelta.relativedelta(**var)
elif type_ == DAT.SET:
return {cls._deserialize(v) for v in var}
elif type_ == DAT.TUPLE:
return tuple([cls._deserialize(v) for v in var])
else:
raise TypeError("Invalid type {!s} in deserialization.".format(type_))
|
https://github.com/apache/airflow/issues/11556
|
$ airflow db check
Traceback (most recent call last):
File "/.../bin/airflow", line 8, in <module>
sys.exit(main())
File "/.../lib/python3.6/site-packages/airflow/__main__.py", line 40, in main
args.func(args)
File "/.../lib/python3.6/site-packages/airflow/cli/cli_parser.py", line 52, in command
func = import_string(import_path)
File "/.../lib/python3.6/site-packages/airflow/utils/module_loading.py", line 32, in import_string
module = import_module(module_path)
File "/.../lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/.../lib/python3.6/site-packages/airflow/cli/commands/db_command.py", line 24, in <module>
from airflow.utils import cli as cli_utils, db
File "/.../lib/python3.6/site-packages/airflow/utils/db.py", line 26, in <module>
from airflow.jobs.base_job import BaseJob # noqa: F401 # pylint: disable=unused-import
File "/.../lib/python3.6/site-packages/airflow/jobs/__init__.py", line 22, in <module>
import airflow.jobs.scheduler_job # noqa
File "/.../lib/python3.6/site-packages/airflow/jobs/scheduler_job.py", line 50, in <module>
from airflow.models.serialized_dag import SerializedDagModel
File "/.../lib/python3.6/site-packages/airflow/models/serialized_dag.py", line 35, in <module>
from airflow.serialization.serialized_objects import SerializedDAG
File "/.../lib/python3.6/site-packages/airflow/serialization/serialized_objects.py", line 37, in <module>
from airflow.kubernetes.pod_generator import PodGenerator
File "/.../lib/python3.6/site-packages/airflow/kubernetes/pod_generator.py", line 35, in <module>
from kubernetes.client import models as k8s
ModuleNotFoundError: No module named 'kubernetes'
|
ModuleNotFoundError
|
def wait_for_pipeline_state(
self,
pipeline_name: str,
pipeline_id: str,
instance_url: str,
namespace: str = "default",
success_states: Optional[List[str]] = None,
failure_states: Optional[List[str]] = None,
timeout: int = 5 * 60,
):
"""
Polls pipeline state and raises an exception if the state is one of
`failure_states` or the operation timeouted.
"""
failure_states = failure_states or FAILURE_STATES
success_states = success_states or SUCCESS_STATES
start_time = monotonic()
current_state = None
while monotonic() - start_time < timeout:
try:
current_state = self._get_workflow_state(
pipeline_name=pipeline_name,
pipeline_id=pipeline_id,
instance_url=instance_url,
namespace=namespace,
)
except AirflowException:
pass # Because the pipeline may not be visible in system yet
if current_state in success_states:
return
if current_state in failure_states:
raise AirflowException(
f"Pipeline {pipeline_name} state {current_state} is not "
f"one of {success_states}"
)
sleep(30)
# Time is up!
raise AirflowException(
f"Pipeline {pipeline_name} state {current_state} is not "
f"one of {success_states} after {timeout}s"
)
|
def wait_for_pipeline_state(
self,
pipeline_name: str,
pipeline_id: str,
instance_url: str,
namespace: str = "default",
success_states: Optional[List[str]] = None,
failure_states: Optional[List[str]] = None,
timeout: int = 5 * 60,
):
"""
Polls pipeline state and raises an exception if the state is one of
`failure_states` or the operation timeouted.
"""
failure_states = failure_states or FAILURE_STATES
success_states = success_states or SUCCESS_STATES
start_time = monotonic()
current_state = None
while monotonic() - start_time < timeout:
current_state = self._get_workflow_state(
pipeline_name=pipeline_name,
pipeline_id=pipeline_id,
instance_url=instance_url,
namespace=namespace,
)
if current_state in success_states:
return
if current_state in failure_states:
raise AirflowException(
f"Pipeline {pipeline_name} state {current_state} is not "
f"one of {success_states}"
)
sleep(30)
# Time is up!
raise AirflowException(
f"Pipeline {pipeline_name} state {current_state} is not "
f"one of {success_states} after {timeout}s"
)
|
https://github.com/apache/airflow/issues/10030
|
[2020-07-28 08:16:46,390] {taskinstance.py:1059} ERROR - Retrieving a pipeline state failed with code 404
Traceback (most recent call last)
File "/usr/local/lib/airflow/airflow/models/taskinstance.py", line 930, in _run_raw_tas
result = task_copy.execute(context=context
File "/usr/local/lib/airflow/airflow/providers/google/cloud/operators/datafusion.py", line 700, in execut
runtime_args=self.runtime_args
File "/usr/local/lib/airflow/airflow/providers/google/cloud/hooks/datafusion.py", line 498, in start_pipelin
instance_url=instance_url
File "/usr/local/lib/airflow/airflow/providers/google/cloud/hooks/datafusion.py", line 114, in wait_for_pipeline_stat
namespace=namespace
File "/usr/local/lib/airflow/airflow/providers/google/cloud/hooks/datafusion.py", line 442, in _get_workflow_stat
f"Retrieving a pipeline state failed with code {response.status}
airflow.exceptions.AirflowException: Retrieving a pipeline state failed with code 404
|
airflow.exceptions.AirflowException
|
def execute(self, context):
emr_hook = EmrHook(aws_conn_id=self.aws_conn_id)
emr = emr_hook.get_conn()
job_flow_id = self.job_flow_id or emr_hook.get_cluster_id_by_name(
self.job_flow_name, self.cluster_states
)
if not job_flow_id:
raise AirflowException("No cluster found for name: " + self.job_flow_name)
if self.do_xcom_push:
context["ti"].xcom_push(key="job_flow_id", value=job_flow_id)
self.log.info("Adding steps to %s", job_flow_id)
response = emr.add_job_flow_steps(JobFlowId=job_flow_id, Steps=self.steps)
if not response["ResponseMetadata"]["HTTPStatusCode"] == 200:
raise AirflowException("Adding steps failed: %s" % response)
else:
self.log.info("Steps %s added to JobFlow", response["StepIds"])
return response["StepIds"]
|
def execute(self, context):
emr = EmrHook(aws_conn_id=self.aws_conn_id).get_conn()
job_flow_id = self.job_flow_id
if not job_flow_id:
job_flow_id = emr.get_cluster_id_by_name(
self.job_flow_name, self.cluster_states
)
if self.do_xcom_push:
context["ti"].xcom_push(key="job_flow_id", value=job_flow_id)
self.log.info("Adding steps to %s", job_flow_id)
response = emr.add_job_flow_steps(JobFlowId=job_flow_id, Steps=self.steps)
if not response["ResponseMetadata"]["HTTPStatusCode"] == 200:
raise AirflowException("Adding steps failed: %s" % response)
else:
self.log.info("Steps %s added to JobFlow", response["StepIds"])
return response["StepIds"]
|
https://github.com/apache/airflow/issues/9127
|
[2020-06-03 18:05:06,862] {taskinstance.py:1145} ERROR - 'EMR' object has no attribute 'get_cluster_id_by_name'
Traceback (most recent call last):
File "/home/ubuntu/.pyenv/versions/3.7.7/envs/.venv_python377/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 983, in _run_raw_task
result = task_copy.execute(context=context)
File "/home/ubuntu/.pyenv/versions/3.7.7/envs/.venv_python377/lib/python3.7/site-packages/airflow/contrib/operators/emr_add_steps_operator.py", line 74, in execute
job_flow_id = emr.get_cluster_id_by_name(self.job_flow_name, self.cluster_states)
File "/home/ubuntu/.pyenv/versions/3.7.7/envs/.venv_python377/lib/python3.7/site-packages/botocore/client.py", line 575, in _getattr_
self._class.name_, item)
AttributeError: 'EMR' object has no attribute 'get_cluster_id_by_name'
[2020-06-03 18:05:06,864] {taskinstance.py:1202} INFO - Marking task as FAILED.dag_id=my_spark_job_dag_id, task_id=my_spark_job_emr_add_step_id, execution_date=20200603T180500, start_date=20200603T180506, end_date=20200603T180506
[2020-06-03 18:05:16,153] {logging_mixin.py:112} INFO - [2020-06-03 18:05:16,153] {local_task_job.py:103} INFO - Task exited with return code 1*
|
AttributeError
|
def trigger(self, session=None):
dag_id = request.values.get("dag_id")
origin = request.values.get("origin") or "/admin/"
if request.method == "GET":
return self.render(
"airflow/trigger.html", dag_id=dag_id, origin=origin, conf=""
)
dag = (
session.query(models.DagModel).filter(models.DagModel.dag_id == dag_id).first()
)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = timezone.utcnow()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
conf = request.values.get("conf")
if conf:
try:
run_conf = json.loads(conf)
except ValueError:
flash("Invalid JSON configuration", "error")
return self.render(
"airflow/trigger.html",
dag_id=dag_id,
origin=origin,
conf=conf,
)
dag = dagbag.get_dag(dag_id)
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True,
)
flash("Triggered {}, it should start any moment now.".format(dag_id))
return redirect(origin)
|
def trigger(self, session=None):
dag_id = request.values.get("dag_id")
origin = request.values.get("origin") or "/admin/"
if request.method == "GET":
return self.render(
"airflow/trigger.html", dag_id=dag_id, origin=origin, conf=""
)
dag = (
session.query(models.DagModel).filter(models.DagModel.dag_id == dag_id).first()
)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = timezone.utcnow()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
conf = request.values.get("conf")
if conf:
try:
run_conf = json.loads(conf)
except ValueError:
flash("Invalid JSON configuration", "error")
return self.render(
"airflow/trigger.html",
dag_id=dag_id,
origin=origin,
conf=conf,
)
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True,
)
flash("Triggered {}, it should start any moment now.".format(dag_id))
return redirect(origin)
|
https://github.com/apache/airflow/issues/8247
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.7/site-packages/airflow/www_rbac/decorators.py", line 121, in wrapper
return f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/security/decorators.py", line 109, in wraps
return f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www_rbac/decorators.py", line 56, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www_rbac/views.py", line 1050, in trigger
external_trigger=True
File "/usr/local/lib/python3.7/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/models/dag.py", line 1818, in create_dagrun
return self.get_dag().create_dagrun(run_id=run_id,
AttributeError: 'NoneType' object has no attribute 'create_dagrun'
|
AttributeError
|
def trigger(self, session=None):
dag_id = request.values.get("dag_id")
origin = request.values.get("origin") or url_for("Airflow.index")
if request.method == "GET":
return self.render_template(
"airflow/trigger.html", dag_id=dag_id, origin=origin, conf=""
)
dag = (
session.query(models.DagModel).filter(models.DagModel.dag_id == dag_id).first()
)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = timezone.utcnow()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
conf = request.values.get("conf")
if conf:
try:
run_conf = json.loads(conf)
except ValueError:
flash("Invalid JSON configuration", "error")
return self.render_template(
"airflow/trigger.html", dag_id=dag_id, origin=origin, conf=conf
)
dag = dagbag.get_dag(dag_id)
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True,
)
flash("Triggered {}, it should start any moment now.".format(dag_id))
return redirect(origin)
|
def trigger(self, session=None):
dag_id = request.values.get("dag_id")
origin = request.values.get("origin") or url_for("Airflow.index")
if request.method == "GET":
return self.render_template(
"airflow/trigger.html", dag_id=dag_id, origin=origin, conf=""
)
dag = (
session.query(models.DagModel).filter(models.DagModel.dag_id == dag_id).first()
)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = timezone.utcnow()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
conf = request.values.get("conf")
if conf:
try:
run_conf = json.loads(conf)
except ValueError:
flash("Invalid JSON configuration", "error")
return self.render_template(
"airflow/trigger.html", dag_id=dag_id, origin=origin, conf=conf
)
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True,
)
flash("Triggered {}, it should start any moment now.".format(dag_id))
return redirect(origin)
|
https://github.com/apache/airflow/issues/8247
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.7/site-packages/airflow/www_rbac/decorators.py", line 121, in wrapper
return f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/security/decorators.py", line 109, in wraps
return f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www_rbac/decorators.py", line 56, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www_rbac/views.py", line 1050, in trigger
external_trigger=True
File "/usr/local/lib/python3.7/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/models/dag.py", line 1818, in create_dagrun
return self.get_dag().create_dagrun(run_id=run_id,
AttributeError: 'NoneType' object has no attribute 'create_dagrun'
|
AttributeError
|
def classify_ode(eq, func=None, dict=False, ics=None, **kwargs):
r"""
Returns a tuple of possible :py:meth:`~sympy.solvers.ode.dsolve`
classifications for an ODE.
The tuple is ordered so that first item is the classification that
:py:meth:`~sympy.solvers.ode.dsolve` uses to solve the ODE by default. In
general, classifications at the near the beginning of the list will
produce better solutions faster than those near the end, thought there are
always exceptions. To make :py:meth:`~sympy.solvers.ode.dsolve` use a
different classification, use ``dsolve(ODE, func,
hint=<classification>)``. See also the
:py:meth:`~sympy.solvers.ode.dsolve` docstring for different meta-hints
you can use.
If ``dict`` is true, :py:meth:`~sympy.solvers.ode.classify_ode` will
return a dictionary of ``hint:match`` expression terms. This is intended
for internal use by :py:meth:`~sympy.solvers.ode.dsolve`. Note that
because dictionaries are ordered arbitrarily, this will most likely not be
in the same order as the tuple.
You can get help on different hints by executing
``help(ode.ode_hintname)``, where ``hintname`` is the name of the hint
without ``_Integral``.
See :py:data:`~sympy.solvers.ode.allhints` or the
:py:mod:`~sympy.solvers.ode` docstring for a list of all supported hints
that can be returned from :py:meth:`~sympy.solvers.ode.classify_ode`.
Notes
=====
These are remarks on hint names.
``_Integral``
If a classification has ``_Integral`` at the end, it will return the
expression with an unevaluated :py:class:`~.Integral`
class in it. Note that a hint may do this anyway if
:py:meth:`~sympy.core.expr.Expr.integrate` cannot do the integral,
though just using an ``_Integral`` will do so much faster. Indeed, an
``_Integral`` hint will always be faster than its corresponding hint
without ``_Integral`` because
:py:meth:`~sympy.core.expr.Expr.integrate` is an expensive routine.
If :py:meth:`~sympy.solvers.ode.dsolve` hangs, it is probably because
:py:meth:`~sympy.core.expr.Expr.integrate` is hanging on a tough or
impossible integral. Try using an ``_Integral`` hint or
``all_Integral`` to get it return something.
Note that some hints do not have ``_Integral`` counterparts. This is
because :py:func:`~sympy.integrals.integrals.integrate` is not used in
solving the ODE for those method. For example, `n`\th order linear
homogeneous ODEs with constant coefficients do not require integration
to solve, so there is no
``nth_linear_homogeneous_constant_coeff_Integrate`` hint. You can
easily evaluate any unevaluated
:py:class:`~sympy.integrals.integrals.Integral`\s in an expression by
doing ``expr.doit()``.
Ordinals
Some hints contain an ordinal such as ``1st_linear``. This is to help
differentiate them from other hints, as well as from other methods
that may not be implemented yet. If a hint has ``nth`` in it, such as
the ``nth_linear`` hints, this means that the method used to applies
to ODEs of any order.
``indep`` and ``dep``
Some hints contain the words ``indep`` or ``dep``. These reference
the independent variable and the dependent function, respectively. For
example, if an ODE is in terms of `f(x)`, then ``indep`` will refer to
`x` and ``dep`` will refer to `f`.
``subs``
If a hints has the word ``subs`` in it, it means the the ODE is solved
by substituting the expression given after the word ``subs`` for a
single dummy variable. This is usually in terms of ``indep`` and
``dep`` as above. The substituted expression will be written only in
characters allowed for names of Python objects, meaning operators will
be spelled out. For example, ``indep``/``dep`` will be written as
``indep_div_dep``.
``coeff``
The word ``coeff`` in a hint refers to the coefficients of something
in the ODE, usually of the derivative terms. See the docstring for
the individual methods for more info (``help(ode)``). This is
contrast to ``coefficients``, as in ``undetermined_coefficients``,
which refers to the common name of a method.
``_best``
Methods that have more than one fundamental way to solve will have a
hint for each sub-method and a ``_best`` meta-classification. This
will evaluate all hints and return the best, using the same
considerations as the normal ``best`` meta-hint.
Examples
========
>>> from sympy import Function, classify_ode, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> classify_ode(Eq(f(x).diff(x), 0), f(x))
('nth_algebraic', 'separable', '1st_linear', '1st_homogeneous_coeff_best',
'1st_homogeneous_coeff_subs_indep_div_dep',
'1st_homogeneous_coeff_subs_dep_div_indep',
'1st_power_series', 'lie_group',
'nth_linear_constant_coeff_homogeneous',
'nth_linear_euler_eq_homogeneous', 'nth_algebraic_Integral',
'separable_Integral', '1st_linear_Integral',
'1st_homogeneous_coeff_subs_indep_div_dep_Integral',
'1st_homogeneous_coeff_subs_dep_div_indep_Integral')
>>> classify_ode(f(x).diff(x, 2) + 3*f(x).diff(x) + 2*f(x) - 4)
('nth_linear_constant_coeff_undetermined_coefficients',
'nth_linear_constant_coeff_variation_of_parameters',
'nth_linear_constant_coeff_variation_of_parameters_Integral')
"""
ics = sympify(ics)
prep = kwargs.pop("prep", True)
if func and len(func.args) != 1:
raise ValueError(
"dsolve() and classify_ode() only "
"work with functions of one variable, not %s" % func
)
# Some methods want the unprocessed equation
eq_orig = eq
if prep or func is None:
eq, func_ = _preprocess(eq, func)
if func is None:
func = func_
x = func.args[0]
f = func.func
y = Dummy("y")
xi = kwargs.get("xi")
eta = kwargs.get("eta")
terms = kwargs.get("n")
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_ode(
eq.lhs - eq.rhs,
func,
dict=dict,
ics=ics,
xi=xi,
n=terms,
eta=eta,
prep=False,
)
eq = eq.lhs
order = ode_order(eq, f(x))
# hint:matchdict or hint:(tuple of matchdicts)
# Also will contain "default":<default hint> and "order":order items.
matching_hints = {"order": order}
df = f(x).diff(x)
a = Wild("a", exclude=[f(x)])
b = Wild("b", exclude=[f(x)])
c = Wild("c", exclude=[f(x)])
d = Wild("d", exclude=[df, f(x).diff(x, 2)])
e = Wild("e", exclude=[df])
k = Wild("k", exclude=[df])
n = Wild("n", exclude=[x, f(x), df])
c1 = Wild("c1", exclude=[x])
a2 = Wild("a2", exclude=[x, f(x), df])
b2 = Wild("b2", exclude=[x, f(x), df])
c2 = Wild("c2", exclude=[x, f(x), df])
d2 = Wild("d2", exclude=[x, f(x), df])
a3 = Wild("a3", exclude=[f(x), df, f(x).diff(x, 2)])
b3 = Wild("b3", exclude=[f(x), df, f(x).diff(x, 2)])
c3 = Wild("c3", exclude=[f(x), df, f(x).diff(x, 2)])
r3 = {"xi": xi, "eta": eta} # Used for the lie_group hint
boundary = {} # Used to extract initial conditions
C1 = Symbol("C1")
# Preprocessing to get the initial conditions out
if ics is not None:
for funcarg in ics:
# Separating derivatives
if isinstance(funcarg, (Subs, Derivative)):
# f(x).diff(x).subs(x, 0) is a Subs, but f(x).diff(x).subs(x,
# y) is a Derivative
if isinstance(funcarg, Subs):
deriv = funcarg.expr
old = funcarg.variables[0]
new = funcarg.point[0]
elif isinstance(funcarg, Derivative):
deriv = funcarg
# No information on this. Just assume it was x
old = x
new = funcarg.variables[0]
if (
isinstance(deriv, Derivative)
and isinstance(deriv.args[0], AppliedUndef)
and deriv.args[0].func == f
and len(deriv.args[0].args) == 1
and old == x
and not new.has(x)
and all(i == deriv.variables[0] for i in deriv.variables)
and not ics[funcarg].has(f)
):
dorder = ode_order(deriv, x)
temp = "f" + str(dorder)
boundary.update({temp: new, temp + "val": ics[funcarg]})
else:
raise ValueError("Enter valid boundary conditions for Derivatives")
# Separating functions
elif isinstance(funcarg, AppliedUndef):
if (
funcarg.func == f
and len(funcarg.args) == 1
and not funcarg.args[0].has(x)
and not ics[funcarg].has(f)
):
boundary.update({"f0": funcarg.args[0], "f0val": ics[funcarg]})
else:
raise ValueError("Enter valid boundary conditions for Function")
else:
raise ValueError(
"Enter boundary conditions of the form ics={f(point}: value, f(x).diff(x, order).subs(x, point): value}"
)
# Factorable method
r = _ode_factorable_match(eq, func, kwargs.get("x0", 0))
if r:
matching_hints["factorable"] = r
# Any ODE that can be solved with a combination of algebra and
# integrals e.g.:
# d^3/dx^3(x y) = F(x)
r = _nth_algebraic_match(eq_orig, func)
if r["solutions"]:
matching_hints["nth_algebraic"] = r
matching_hints["nth_algebraic_Integral"] = r
eq = expand(eq)
# Precondition to try remove f(x) from highest order derivative
reduced_eq = None
if eq.is_Add:
deriv_coef = eq.coeff(f(x).diff(x, order))
if deriv_coef not in (1, 0):
r = deriv_coef.match(a * f(x) ** c1)
if r and r[c1]:
den = f(x) ** r[c1]
reduced_eq = Add(*[arg / den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
## Linear case: a(x)*y'+b(x)*y+c(x) == 0
if eq.is_Add:
ind, dep = reduced_eq.as_independent(f)
else:
u = Dummy("u")
ind, dep = (reduced_eq + u).as_independent(f)
ind, dep = [tmp.subs(u, 0) for tmp in [ind, dep]]
r = {a: dep.coeff(df), b: dep.coeff(f(x)), c: ind}
# double check f[a] since the preconditioning may have failed
if (
not r[a].has(f)
and not r[b].has(f)
and (r[a] * df + r[b] * f(x) + r[c]).expand() - reduced_eq == 0
):
r["a"] = a
r["b"] = b
r["c"] = c
matching_hints["1st_linear"] = r
matching_hints["1st_linear_Integral"] = r
## Bernoulli case: a(x)*y'+b(x)*y+c(x)*y**n == 0
r = collect(reduced_eq, f(x), exact=True).match(
a * df + b * f(x) + c * f(x) ** n
)
if r and r[c] != 0 and r[n] != 1: # See issue 4676
r["a"] = a
r["b"] = b
r["c"] = c
r["n"] = n
matching_hints["Bernoulli"] = r
matching_hints["Bernoulli_Integral"] = r
## Riccati special n == -2 case: a2*y'+b2*y**2+c2*y/x+d2/x**2 == 0
r = collect(reduced_eq, f(x), exact=True).match(
a2 * df + b2 * f(x) ** 2 + c2 * f(x) / x + d2 / x**2
)
if r and r[b2] != 0 and (r[c2] != 0 or r[d2] != 0):
r["a2"] = a2
r["b2"] = b2
r["c2"] = c2
r["d2"] = d2
matching_hints["Riccati_special_minus2"] = r
# NON-REDUCED FORM OF EQUATION matches
r = collect(eq, df, exact=True).match(d + e * df)
if r:
r["d"] = d
r["e"] = e
r["y"] = y
r[d] = r[d].subs(f(x), y)
r[e] = r[e].subs(f(x), y)
# FIRST ORDER POWER SERIES WHICH NEEDS INITIAL CONDITIONS
# TODO: Hint first order series should match only if d/e is analytic.
# For now, only d/e and (d/e).diff(arg) is checked for existence at
# at a given point.
# This is currently done internally in ode_1st_power_series.
point = boundary.get("f0", 0)
value = boundary.get("f0val", C1)
check = cancel(r[d] / r[e])
check1 = check.subs({x: point, y: value})
if (
not check1.has(oo)
and not check1.has(zoo)
and not check1.has(NaN)
and not check1.has(-oo)
):
check2 = (check1.diff(x)).subs({x: point, y: value})
if (
not check2.has(oo)
and not check2.has(zoo)
and not check2.has(NaN)
and not check2.has(-oo)
):
rseries = r.copy()
rseries.update({"terms": terms, "f0": point, "f0val": value})
matching_hints["1st_power_series"] = rseries
r3.update(r)
## Exact Differential Equation: P(x, y) + Q(x, y)*y' = 0 where
# dP/dy == dQ/dx
try:
if r[d] != 0:
numerator = simplify(r[d].diff(y) - r[e].diff(x))
# The following few conditions try to convert a non-exact
# differential equation into an exact one.
# References : Differential equations with applications
# and historical notes - George E. Simmons
if numerator:
# If (dP/dy - dQ/dx) / Q = f(x)
# then exp(integral(f(x))*equation becomes exact
factor = simplify(numerator / r[e])
variables = factor.free_symbols
if len(variables) == 1 and x == variables.pop():
factor = exp(Integral(factor).doit())
r[d] *= factor
r[e] *= factor
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
else:
# If (dP/dy - dQ/dx) / -P = f(y)
# then exp(integral(f(y))*equation becomes exact
factor = simplify(-numerator / r[d])
variables = factor.free_symbols
if len(variables) == 1 and y == variables.pop():
factor = exp(Integral(factor).doit())
r[d] *= factor
r[e] *= factor
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
else:
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
except NotImplementedError:
# Differentiating the coefficients might fail because of things
# like f(2*x).diff(x). See issue 4624 and issue 4719.
pass
# Any first order ODE can be ideally solved by the Lie Group
# method
matching_hints["lie_group"] = r3
# This match is used for several cases below; we now collect on
# f(x) so the matching works.
r = collect(reduced_eq, df, exact=True).match(d + e * df)
if r is None and "factorable" not in matching_hints:
roots = solve(reduced_eq, df)
if roots:
meq = Mul(*[(df - i) for i in roots]) * Dummy()
m = _ode_factorable_match(meq, func, kwargs.get("x0", 0))
matching_hints["factorable"] = m
if r:
# Using r[d] and r[e] without any modification for hints
# linear-coefficients and separable-reduced.
num, den = r[d], r[e] # ODE = d/e + df
r["d"] = d
r["e"] = e
r["y"] = y
r[d] = num.subs(f(x), y)
r[e] = den.subs(f(x), y)
## Separable Case: y' == P(y)*Q(x)
r[d] = separatevars(r[d])
r[e] = separatevars(r[e])
# m1[coeff]*m1[x]*m1[y] + m2[coeff]*m2[x]*m2[y]*y'
m1 = separatevars(r[d], dict=True, symbols=(x, y))
m2 = separatevars(r[e], dict=True, symbols=(x, y))
if m1 and m2:
r1 = {"m1": m1, "m2": m2, "y": y}
matching_hints["separable"] = r1
matching_hints["separable_Integral"] = r1
## First order equation with homogeneous coefficients:
# dy/dx == F(y/x) or dy/dx == F(x/y)
ordera = homogeneous_order(r[d], x, y)
if ordera is not None:
orderb = homogeneous_order(r[e], x, y)
if ordera == orderb:
# u1=y/x and u2=x/y
u1 = Dummy("u1")
u2 = Dummy("u2")
s = "1st_homogeneous_coeff_subs"
s1 = s + "_dep_div_indep"
s2 = s + "_indep_div_dep"
if simplify((r[d] + u1 * r[e]).subs({x: 1, y: u1})) != 0:
matching_hints[s1] = r
matching_hints[s1 + "_Integral"] = r
if simplify((r[e] + u2 * r[d]).subs({x: u2, y: 1})) != 0:
matching_hints[s2] = r
matching_hints[s2 + "_Integral"] = r
if s1 in matching_hints and s2 in matching_hints:
matching_hints["1st_homogeneous_coeff_best"] = r
## Linear coefficients of the form
# y'+ F((a*x + b*y + c)/(a'*x + b'y + c')) = 0
# that can be reduced to homogeneous form.
F = num / den
params = _linear_coeff_match(F, func)
if params:
xarg, yarg = params
u = Dummy("u")
t = Dummy("t")
# Dummy substitution for df and f(x).
dummy_eq = reduced_eq.subs(((df, t), (f(x), u)))
reps = ((x, x + xarg), (u, u + yarg), (t, df), (u, f(x)))
dummy_eq = simplify(dummy_eq.subs(reps))
# get the re-cast values for e and d
r2 = collect(expand(dummy_eq), [df, f(x)]).match(e * df + d)
if r2:
orderd = homogeneous_order(r2[d], x, f(x))
if orderd is not None:
ordere = homogeneous_order(r2[e], x, f(x))
if orderd == ordere:
# Match arguments are passed in such a way that it
# is coherent with the already existing homogeneous
# functions.
r2[d] = r2[d].subs(f(x), y)
r2[e] = r2[e].subs(f(x), y)
r2.update(
{"xarg": xarg, "yarg": yarg, "d": d, "e": e, "y": y}
)
matching_hints["linear_coefficients"] = r2
matching_hints["linear_coefficients_Integral"] = r2
## Equation of the form y' + (y/x)*H(x^n*y) = 0
# that can be reduced to separable form
factor = simplify(x / f(x) * num / den)
# Try representing factor in terms of x^n*y
# where n is lowest power of x in factor;
# first remove terms like sqrt(2)*3 from factor.atoms(Mul)
u = None
for mul in ordered(factor.atoms(Mul)):
if mul.has(x):
_, u = mul.as_independent(x, f(x))
break
if u and u.has(f(x)):
h = x ** (degree(Poly(u.subs(f(x), y), gen=x))) * f(x)
p = Wild("p")
if (u / h == 1) or ((u / h).simplify().match(x**p)):
t = Dummy("t")
r2 = {"t": t}
xpart, ypart = u.as_independent(f(x))
test = factor.subs(((u, t), (1 / u, 1 / t)))
free = test.free_symbols
if len(free) == 1 and free.pop() == t:
r2.update({"power": xpart.as_base_exp()[1], "u": test})
matching_hints["separable_reduced"] = r2
matching_hints["separable_reduced_Integral"] = r2
## Almost-linear equation of the form f(x)*g(y)*y' + k(x)*l(y) + m(x) = 0
r = collect(eq, [df, f(x)]).match(e * df + d)
if r:
r2 = r.copy()
r2[c] = S.Zero
if r2[d].is_Add:
# Separate the terms having f(x) to r[d] and
# remaining to r[c]
no_f, r2[d] = r2[d].as_independent(f(x))
r2[c] += no_f
factor = simplify(r2[d].diff(f(x)) / r[e])
if factor and not factor.has(f(x)):
r2[d] = factor_terms(r2[d])
u = r2[d].as_independent(f(x), as_Add=False)[1]
r2.update({"a": e, "b": d, "c": c, "u": u})
r2[d] /= u
r2[e] /= u.diff(f(x))
matching_hints["almost_linear"] = r2
matching_hints["almost_linear_Integral"] = r2
elif order == 2:
# Liouville ODE in the form
# f(x).diff(x, 2) + g(f(x))*(f(x).diff(x))**2 + h(x)*f(x).diff(x)
# See Goldstein and Braun, "Advanced Methods for the Solution of
# Differential Equations", pg. 98
s = d * f(x).diff(x, 2) + e * df**2 + k * df
r = reduced_eq.match(s)
if r and r[d] != 0:
y = Dummy("y")
g = simplify(r[e] / r[d]).subs(f(x), y)
h = simplify(r[k] / r[d]).subs(f(x), y)
if y in h.free_symbols or x in g.free_symbols:
pass
else:
r = {"g": g, "h": h, "y": y}
matching_hints["Liouville"] = r
matching_hints["Liouville_Integral"] = r
# Homogeneous second order differential equation of the form
# a3*f(x).diff(x, 2) + b3*f(x).diff(x) + c3
# It has a definite power series solution at point x0 if, b3/a3 and c3/a3
# are analytic at x0.
deq = a3 * (f(x).diff(x, 2)) + b3 * df + c3 * f(x)
r = collect(reduced_eq, [f(x).diff(x, 2), f(x).diff(x), f(x)]).match(deq)
ordinary = False
if r:
if not all([r[key].is_polynomial() for key in r]):
n, d = reduced_eq.as_numer_denom()
reduced_eq = expand(n)
r = collect(reduced_eq, [f(x).diff(x, 2), f(x).diff(x), f(x)]).match(
deq
)
if r and r[a3] != 0:
p = cancel(r[b3] / r[a3]) # Used below
q = cancel(r[c3] / r[a3]) # Used below
point = kwargs.get("x0", 0)
check = p.subs(x, point)
if not check.has(oo, NaN, zoo, -oo):
check = q.subs(x, point)
if not check.has(oo, NaN, zoo, -oo):
ordinary = True
r.update(
{"a3": a3, "b3": b3, "c3": c3, "x0": point, "terms": terms}
)
matching_hints["2nd_power_series_ordinary"] = r
# Checking if the differential equation has a regular singular point
# at x0. It has a regular singular point at x0, if (b3/a3)*(x - x0)
# and (c3/a3)*((x - x0)**2) are analytic at x0.
if not ordinary:
p = cancel((x - point) * p)
check = p.subs(x, point)
if not check.has(oo, NaN, zoo, -oo):
q = cancel(((x - point) ** 2) * q)
check = q.subs(x, point)
if not check.has(oo, NaN, zoo, -oo):
coeff_dict = {"p": p, "q": q, "x0": point, "terms": terms}
matching_hints["2nd_power_series_regular"] = coeff_dict
# For Hypergeometric solutions.
_r = {}
_r.update(r)
rn = match_2nd_hypergeometric(_r, func)
if rn:
matching_hints["2nd_hypergeometric"] = rn
matching_hints["2nd_hypergeometric_Integral"] = rn
# If the ODE has regular singular point at x0 and is of the form
# Eq((x)**2*Derivative(y(x), x, x) + x*Derivative(y(x), x) +
# (a4**2*x**(2*p)-n**2)*y(x) thus Bessel's equation
rn = match_2nd_linear_bessel(r, f(x))
if rn:
matching_hints["2nd_linear_bessel"] = rn
# If the ODE is ordinary and is of the form of Airy's Equation
# Eq(x**2*Derivative(y(x),x,x)-(ax+b)*y(x))
if p.is_zero:
a4 = Wild("a4", exclude=[x, f(x), df])
b4 = Wild("b4", exclude=[x, f(x), df])
rn = q.match(a4 + b4 * x)
if rn and rn[b4] != 0:
rn = {"b": rn[a4], "m": rn[b4]}
matching_hints["2nd_linear_airy"] = rn
if order > 0:
# Any ODE that can be solved with a substitution and
# repeated integration e.g.:
# `d^2/dx^2(y) + x*d/dx(y) = constant
# f'(x) must be finite for this to work
r = _nth_order_reducible_match(reduced_eq, func)
if r:
matching_hints["nth_order_reducible"] = r
# nth order linear ODE
# a_n(x)y^(n) + ... + a_1(x)y' + a_0(x)y = F(x) = b
r = _nth_linear_match(reduced_eq, func, order)
# Constant coefficient case (a_i is constant for all i)
if r and not any(r[i].has(x) for i in r if i >= 0):
# Inhomogeneous case: F(x) is not identically 0
if r[-1]:
eq_homogeneous = Add(eq, -r[-1])
undetcoeff = _undetermined_coefficients_match(
r[-1], x, func, eq_homogeneous
)
s = "nth_linear_constant_coeff_variation_of_parameters"
matching_hints[s] = r
matching_hints[s + "_Integral"] = r
if undetcoeff["test"]:
r["trialset"] = undetcoeff["trialset"]
matching_hints[
"nth_linear_constant_coeff_undetermined_coefficients"
] = r
# Homogeneous case: F(x) is identically 0
else:
matching_hints["nth_linear_constant_coeff_homogeneous"] = r
# nth order Euler equation a_n*x**n*y^(n) + ... + a_1*x*y' + a_0*y = F(x)
# In case of Homogeneous euler equation F(x) = 0
def _test_term(coeff, order):
r"""
Linear Euler ODEs have the form K*x**order*diff(y(x),x,order) = F(x),
where K is independent of x and y(x), order>= 0.
So we need to check that for each term, coeff == K*x**order from
some K. We have a few cases, since coeff may have several
different types.
"""
if order < 0:
raise ValueError("order should be greater than 0")
if coeff == 0:
return True
if order == 0:
if x in coeff.free_symbols:
return False
return True
if coeff.is_Mul:
if coeff.has(f(x)):
return False
return x**order in coeff.args
elif coeff.is_Pow:
return coeff.as_base_exp() == (x, order)
elif order == 1:
return x == coeff
return False
# Find coefficient for highest derivative, multiply coefficients to
# bring the equation into Euler form if possible
r_rescaled = None
if r is not None:
coeff = r[order]
factor = x**order / coeff
r_rescaled = {i: factor * r[i] for i in r if i != "trialset"}
# XXX: Mixing up the trialset with the coefficients is error-prone.
# These should be separated as something like r['coeffs'] and
# r['trialset']
if r_rescaled and not any(
not _test_term(r_rescaled[i], i)
for i in r_rescaled
if i != "trialset" and i >= 0
):
if not r_rescaled[-1]:
matching_hints["nth_linear_euler_eq_homogeneous"] = r_rescaled
else:
matching_hints[
"nth_linear_euler_eq_nonhomogeneous_variation_of_parameters"
] = r_rescaled
matching_hints[
"nth_linear_euler_eq_nonhomogeneous_variation_of_parameters_Integral"
] = r_rescaled
e, re = posify(r_rescaled[-1].subs(x, exp(x)))
undetcoeff = _undetermined_coefficients_match(e.subs(re), x)
if undetcoeff["test"]:
r_rescaled["trialset"] = undetcoeff["trialset"]
matching_hints[
"nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients"
] = r_rescaled
# Order keys based on allhints.
retlist = [i for i in allhints if i in matching_hints]
if dict:
# Dictionaries are ordered arbitrarily, so make note of which
# hint would come first for dsolve(). Use an ordered dict in Py 3.
matching_hints["default"] = retlist[0] if retlist else None
matching_hints["ordered_hints"] = tuple(retlist)
return matching_hints
else:
return tuple(retlist)
|
def classify_ode(eq, func=None, dict=False, ics=None, **kwargs):
r"""
Returns a tuple of possible :py:meth:`~sympy.solvers.ode.dsolve`
classifications for an ODE.
The tuple is ordered so that first item is the classification that
:py:meth:`~sympy.solvers.ode.dsolve` uses to solve the ODE by default. In
general, classifications at the near the beginning of the list will
produce better solutions faster than those near the end, thought there are
always exceptions. To make :py:meth:`~sympy.solvers.ode.dsolve` use a
different classification, use ``dsolve(ODE, func,
hint=<classification>)``. See also the
:py:meth:`~sympy.solvers.ode.dsolve` docstring for different meta-hints
you can use.
If ``dict`` is true, :py:meth:`~sympy.solvers.ode.classify_ode` will
return a dictionary of ``hint:match`` expression terms. This is intended
for internal use by :py:meth:`~sympy.solvers.ode.dsolve`. Note that
because dictionaries are ordered arbitrarily, this will most likely not be
in the same order as the tuple.
You can get help on different hints by executing
``help(ode.ode_hintname)``, where ``hintname`` is the name of the hint
without ``_Integral``.
See :py:data:`~sympy.solvers.ode.allhints` or the
:py:mod:`~sympy.solvers.ode` docstring for a list of all supported hints
that can be returned from :py:meth:`~sympy.solvers.ode.classify_ode`.
Notes
=====
These are remarks on hint names.
``_Integral``
If a classification has ``_Integral`` at the end, it will return the
expression with an unevaluated :py:class:`~.Integral`
class in it. Note that a hint may do this anyway if
:py:meth:`~sympy.core.expr.Expr.integrate` cannot do the integral,
though just using an ``_Integral`` will do so much faster. Indeed, an
``_Integral`` hint will always be faster than its corresponding hint
without ``_Integral`` because
:py:meth:`~sympy.core.expr.Expr.integrate` is an expensive routine.
If :py:meth:`~sympy.solvers.ode.dsolve` hangs, it is probably because
:py:meth:`~sympy.core.expr.Expr.integrate` is hanging on a tough or
impossible integral. Try using an ``_Integral`` hint or
``all_Integral`` to get it return something.
Note that some hints do not have ``_Integral`` counterparts. This is
because :py:func:`~sympy.integrals.integrals.integrate` is not used in
solving the ODE for those method. For example, `n`\th order linear
homogeneous ODEs with constant coefficients do not require integration
to solve, so there is no
``nth_linear_homogeneous_constant_coeff_Integrate`` hint. You can
easily evaluate any unevaluated
:py:class:`~sympy.integrals.integrals.Integral`\s in an expression by
doing ``expr.doit()``.
Ordinals
Some hints contain an ordinal such as ``1st_linear``. This is to help
differentiate them from other hints, as well as from other methods
that may not be implemented yet. If a hint has ``nth`` in it, such as
the ``nth_linear`` hints, this means that the method used to applies
to ODEs of any order.
``indep`` and ``dep``
Some hints contain the words ``indep`` or ``dep``. These reference
the independent variable and the dependent function, respectively. For
example, if an ODE is in terms of `f(x)`, then ``indep`` will refer to
`x` and ``dep`` will refer to `f`.
``subs``
If a hints has the word ``subs`` in it, it means the the ODE is solved
by substituting the expression given after the word ``subs`` for a
single dummy variable. This is usually in terms of ``indep`` and
``dep`` as above. The substituted expression will be written only in
characters allowed for names of Python objects, meaning operators will
be spelled out. For example, ``indep``/``dep`` will be written as
``indep_div_dep``.
``coeff``
The word ``coeff`` in a hint refers to the coefficients of something
in the ODE, usually of the derivative terms. See the docstring for
the individual methods for more info (``help(ode)``). This is
contrast to ``coefficients``, as in ``undetermined_coefficients``,
which refers to the common name of a method.
``_best``
Methods that have more than one fundamental way to solve will have a
hint for each sub-method and a ``_best`` meta-classification. This
will evaluate all hints and return the best, using the same
considerations as the normal ``best`` meta-hint.
Examples
========
>>> from sympy import Function, classify_ode, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> classify_ode(Eq(f(x).diff(x), 0), f(x))
('nth_algebraic', 'separable', '1st_linear', '1st_homogeneous_coeff_best',
'1st_homogeneous_coeff_subs_indep_div_dep',
'1st_homogeneous_coeff_subs_dep_div_indep',
'1st_power_series', 'lie_group',
'nth_linear_constant_coeff_homogeneous',
'nth_linear_euler_eq_homogeneous', 'nth_algebraic_Integral',
'separable_Integral', '1st_linear_Integral',
'1st_homogeneous_coeff_subs_indep_div_dep_Integral',
'1st_homogeneous_coeff_subs_dep_div_indep_Integral')
>>> classify_ode(f(x).diff(x, 2) + 3*f(x).diff(x) + 2*f(x) - 4)
('nth_linear_constant_coeff_undetermined_coefficients',
'nth_linear_constant_coeff_variation_of_parameters',
'nth_linear_constant_coeff_variation_of_parameters_Integral')
"""
ics = sympify(ics)
prep = kwargs.pop("prep", True)
if func and len(func.args) != 1:
raise ValueError(
"dsolve() and classify_ode() only "
"work with functions of one variable, not %s" % func
)
# Some methods want the unprocessed equation
eq_orig = eq
if prep or func is None:
eq, func_ = _preprocess(eq, func)
if func is None:
func = func_
x = func.args[0]
f = func.func
y = Dummy("y")
xi = kwargs.get("xi")
eta = kwargs.get("eta")
terms = kwargs.get("n")
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_ode(
eq.lhs - eq.rhs,
func,
dict=dict,
ics=ics,
xi=xi,
n=terms,
eta=eta,
prep=False,
)
eq = eq.lhs
order = ode_order(eq, f(x))
# hint:matchdict or hint:(tuple of matchdicts)
# Also will contain "default":<default hint> and "order":order items.
matching_hints = {"order": order}
df = f(x).diff(x)
a = Wild("a", exclude=[f(x)])
b = Wild("b", exclude=[f(x)])
c = Wild("c", exclude=[f(x)])
d = Wild("d", exclude=[df, f(x).diff(x, 2)])
e = Wild("e", exclude=[df])
k = Wild("k", exclude=[df])
n = Wild("n", exclude=[x, f(x), df])
c1 = Wild("c1", exclude=[x])
a2 = Wild("a2", exclude=[x, f(x), df])
b2 = Wild("b2", exclude=[x, f(x), df])
c2 = Wild("c2", exclude=[x, f(x), df])
d2 = Wild("d2", exclude=[x, f(x), df])
a3 = Wild("a3", exclude=[f(x), df, f(x).diff(x, 2)])
b3 = Wild("b3", exclude=[f(x), df, f(x).diff(x, 2)])
c3 = Wild("c3", exclude=[f(x), df, f(x).diff(x, 2)])
r3 = {"xi": xi, "eta": eta} # Used for the lie_group hint
boundary = {} # Used to extract initial conditions
C1 = Symbol("C1")
# Preprocessing to get the initial conditions out
if ics is not None:
for funcarg in ics:
# Separating derivatives
if isinstance(funcarg, (Subs, Derivative)):
# f(x).diff(x).subs(x, 0) is a Subs, but f(x).diff(x).subs(x,
# y) is a Derivative
if isinstance(funcarg, Subs):
deriv = funcarg.expr
old = funcarg.variables[0]
new = funcarg.point[0]
elif isinstance(funcarg, Derivative):
deriv = funcarg
# No information on this. Just assume it was x
old = x
new = funcarg.variables[0]
if (
isinstance(deriv, Derivative)
and isinstance(deriv.args[0], AppliedUndef)
and deriv.args[0].func == f
and len(deriv.args[0].args) == 1
and old == x
and not new.has(x)
and all(i == deriv.variables[0] for i in deriv.variables)
and not ics[funcarg].has(f)
):
dorder = ode_order(deriv, x)
temp = "f" + str(dorder)
boundary.update({temp: new, temp + "val": ics[funcarg]})
else:
raise ValueError("Enter valid boundary conditions for Derivatives")
# Separating functions
elif isinstance(funcarg, AppliedUndef):
if (
funcarg.func == f
and len(funcarg.args) == 1
and not funcarg.args[0].has(x)
and not ics[funcarg].has(f)
):
boundary.update({"f0": funcarg.args[0], "f0val": ics[funcarg]})
else:
raise ValueError("Enter valid boundary conditions for Function")
else:
raise ValueError(
"Enter boundary conditions of the form ics={f(point}: value, f(x).diff(x, order).subs(x, point): value}"
)
# Factorable method
r = _ode_factorable_match(eq, func, kwargs.get("x0", 0))
if r:
matching_hints["factorable"] = r
# Any ODE that can be solved with a combination of algebra and
# integrals e.g.:
# d^3/dx^3(x y) = F(x)
r = _nth_algebraic_match(eq_orig, func)
if r["solutions"]:
matching_hints["nth_algebraic"] = r
matching_hints["nth_algebraic_Integral"] = r
eq = expand(eq)
# Precondition to try remove f(x) from highest order derivative
reduced_eq = None
if eq.is_Add:
deriv_coef = eq.coeff(f(x).diff(x, order))
if deriv_coef not in (1, 0):
r = deriv_coef.match(a * f(x) ** c1)
if r and r[c1]:
den = f(x) ** r[c1]
reduced_eq = Add(*[arg / den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
## Linear case: a(x)*y'+b(x)*y+c(x) == 0
if eq.is_Add:
ind, dep = reduced_eq.as_independent(f)
else:
u = Dummy("u")
ind, dep = (reduced_eq + u).as_independent(f)
ind, dep = [tmp.subs(u, 0) for tmp in [ind, dep]]
r = {a: dep.coeff(df), b: dep.coeff(f(x)), c: ind}
# double check f[a] since the preconditioning may have failed
if (
not r[a].has(f)
and not r[b].has(f)
and (r[a] * df + r[b] * f(x) + r[c]).expand() - reduced_eq == 0
):
r["a"] = a
r["b"] = b
r["c"] = c
matching_hints["1st_linear"] = r
matching_hints["1st_linear_Integral"] = r
## Bernoulli case: a(x)*y'+b(x)*y+c(x)*y**n == 0
r = collect(reduced_eq, f(x), exact=True).match(
a * df + b * f(x) + c * f(x) ** n
)
if r and r[c] != 0 and r[n] != 1: # See issue 4676
r["a"] = a
r["b"] = b
r["c"] = c
r["n"] = n
matching_hints["Bernoulli"] = r
matching_hints["Bernoulli_Integral"] = r
## Riccati special n == -2 case: a2*y'+b2*y**2+c2*y/x+d2/x**2 == 0
r = collect(reduced_eq, f(x), exact=True).match(
a2 * df + b2 * f(x) ** 2 + c2 * f(x) / x + d2 / x**2
)
if r and r[b2] != 0 and (r[c2] != 0 or r[d2] != 0):
r["a2"] = a2
r["b2"] = b2
r["c2"] = c2
r["d2"] = d2
matching_hints["Riccati_special_minus2"] = r
# NON-REDUCED FORM OF EQUATION matches
r = collect(eq, df, exact=True).match(d + e * df)
if r:
r["d"] = d
r["e"] = e
r["y"] = y
r[d] = r[d].subs(f(x), y)
r[e] = r[e].subs(f(x), y)
# FIRST ORDER POWER SERIES WHICH NEEDS INITIAL CONDITIONS
# TODO: Hint first order series should match only if d/e is analytic.
# For now, only d/e and (d/e).diff(arg) is checked for existence at
# at a given point.
# This is currently done internally in ode_1st_power_series.
point = boundary.get("f0", 0)
value = boundary.get("f0val", C1)
check = cancel(r[d] / r[e])
check1 = check.subs({x: point, y: value})
if (
not check1.has(oo)
and not check1.has(zoo)
and not check1.has(NaN)
and not check1.has(-oo)
):
check2 = (check1.diff(x)).subs({x: point, y: value})
if (
not check2.has(oo)
and not check2.has(zoo)
and not check2.has(NaN)
and not check2.has(-oo)
):
rseries = r.copy()
rseries.update({"terms": terms, "f0": point, "f0val": value})
matching_hints["1st_power_series"] = rseries
r3.update(r)
## Exact Differential Equation: P(x, y) + Q(x, y)*y' = 0 where
# dP/dy == dQ/dx
try:
if r[d] != 0:
numerator = simplify(r[d].diff(y) - r[e].diff(x))
# The following few conditions try to convert a non-exact
# differential equation into an exact one.
# References : Differential equations with applications
# and historical notes - George E. Simmons
if numerator:
# If (dP/dy - dQ/dx) / Q = f(x)
# then exp(integral(f(x))*equation becomes exact
factor = simplify(numerator / r[e])
variables = factor.free_symbols
if len(variables) == 1 and x == variables.pop():
factor = exp(Integral(factor).doit())
r[d] *= factor
r[e] *= factor
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
else:
# If (dP/dy - dQ/dx) / -P = f(y)
# then exp(integral(f(y))*equation becomes exact
factor = simplify(-numerator / r[d])
variables = factor.free_symbols
if len(variables) == 1 and y == variables.pop():
factor = exp(Integral(factor).doit())
r[d] *= factor
r[e] *= factor
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
else:
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
except NotImplementedError:
# Differentiating the coefficients might fail because of things
# like f(2*x).diff(x). See issue 4624 and issue 4719.
pass
# Any first order ODE can be ideally solved by the Lie Group
# method
matching_hints["lie_group"] = r3
# This match is used for several cases below; we now collect on
# f(x) so the matching works.
r = collect(reduced_eq, df, exact=True).match(d + e * df)
if r is None and "factorable" not in matching_hints:
roots = solve(reduced_eq, df)
if roots:
meq = Mul(*[(df - i) for i in roots]) * Dummy()
m = _ode_factorable_match(meq, func, kwargs.get("x0", 0))
matching_hints["factorable"] = m
if r:
# Using r[d] and r[e] without any modification for hints
# linear-coefficients and separable-reduced.
num, den = r[d], r[e] # ODE = d/e + df
r["d"] = d
r["e"] = e
r["y"] = y
r[d] = num.subs(f(x), y)
r[e] = den.subs(f(x), y)
## Separable Case: y' == P(y)*Q(x)
r[d] = separatevars(r[d])
r[e] = separatevars(r[e])
# m1[coeff]*m1[x]*m1[y] + m2[coeff]*m2[x]*m2[y]*y'
m1 = separatevars(r[d], dict=True, symbols=(x, y))
m2 = separatevars(r[e], dict=True, symbols=(x, y))
if m1 and m2:
r1 = {"m1": m1, "m2": m2, "y": y}
matching_hints["separable"] = r1
matching_hints["separable_Integral"] = r1
## First order equation with homogeneous coefficients:
# dy/dx == F(y/x) or dy/dx == F(x/y)
ordera = homogeneous_order(r[d], x, y)
if ordera is not None:
orderb = homogeneous_order(r[e], x, y)
if ordera == orderb:
# u1=y/x and u2=x/y
u1 = Dummy("u1")
u2 = Dummy("u2")
s = "1st_homogeneous_coeff_subs"
s1 = s + "_dep_div_indep"
s2 = s + "_indep_div_dep"
if simplify((r[d] + u1 * r[e]).subs({x: 1, y: u1})) != 0:
matching_hints[s1] = r
matching_hints[s1 + "_Integral"] = r
if simplify((r[e] + u2 * r[d]).subs({x: u2, y: 1})) != 0:
matching_hints[s2] = r
matching_hints[s2 + "_Integral"] = r
if s1 in matching_hints and s2 in matching_hints:
matching_hints["1st_homogeneous_coeff_best"] = r
## Linear coefficients of the form
# y'+ F((a*x + b*y + c)/(a'*x + b'y + c')) = 0
# that can be reduced to homogeneous form.
F = num / den
params = _linear_coeff_match(F, func)
if params:
xarg, yarg = params
u = Dummy("u")
t = Dummy("t")
# Dummy substitution for df and f(x).
dummy_eq = reduced_eq.subs(((df, t), (f(x), u)))
reps = ((x, x + xarg), (u, u + yarg), (t, df), (u, f(x)))
dummy_eq = simplify(dummy_eq.subs(reps))
# get the re-cast values for e and d
r2 = collect(expand(dummy_eq), [df, f(x)]).match(e * df + d)
if r2:
orderd = homogeneous_order(r2[d], x, f(x))
if orderd is not None:
ordere = homogeneous_order(r2[e], x, f(x))
if orderd == ordere:
# Match arguments are passed in such a way that it
# is coherent with the already existing homogeneous
# functions.
r2[d] = r2[d].subs(f(x), y)
r2[e] = r2[e].subs(f(x), y)
r2.update(
{"xarg": xarg, "yarg": yarg, "d": d, "e": e, "y": y}
)
matching_hints["linear_coefficients"] = r2
matching_hints["linear_coefficients_Integral"] = r2
## Equation of the form y' + (y/x)*H(x^n*y) = 0
# that can be reduced to separable form
factor = simplify(x / f(x) * num / den)
# Try representing factor in terms of x^n*y
# where n is lowest power of x in factor;
# first remove terms like sqrt(2)*3 from factor.atoms(Mul)
u = None
for mul in ordered(factor.atoms(Mul)):
if mul.has(x):
_, u = mul.as_independent(x, f(x))
break
if u and u.has(f(x)):
h = x ** (degree(Poly(u.subs(f(x), y), gen=x))) * f(x)
p = Wild("p")
if (u / h == 1) or ((u / h).simplify().match(x**p)):
t = Dummy("t")
r2 = {"t": t}
xpart, ypart = u.as_independent(f(x))
test = factor.subs(((u, t), (1 / u, 1 / t)))
free = test.free_symbols
if len(free) == 1 and free.pop() == t:
r2.update({"power": xpart.as_base_exp()[1], "u": test})
matching_hints["separable_reduced"] = r2
matching_hints["separable_reduced_Integral"] = r2
## Almost-linear equation of the form f(x)*g(y)*y' + k(x)*l(y) + m(x) = 0
r = collect(eq, [df, f(x)]).match(e * df + d)
if r:
r2 = r.copy()
r2[c] = S.Zero
if r2[d].is_Add:
# Separate the terms having f(x) to r[d] and
# remaining to r[c]
no_f, r2[d] = r2[d].as_independent(f(x))
r2[c] += no_f
factor = simplify(r2[d].diff(f(x)) / r[e])
if factor and not factor.has(f(x)):
r2[d] = factor_terms(r2[d])
u = r2[d].as_independent(f(x), as_Add=False)[1]
r2.update({"a": e, "b": d, "c": c, "u": u})
r2[d] /= u
r2[e] /= u.diff(f(x))
matching_hints["almost_linear"] = r2
matching_hints["almost_linear_Integral"] = r2
elif order == 2:
# Liouville ODE in the form
# f(x).diff(x, 2) + g(f(x))*(f(x).diff(x))**2 + h(x)*f(x).diff(x)
# See Goldstein and Braun, "Advanced Methods for the Solution of
# Differential Equations", pg. 98
s = d * f(x).diff(x, 2) + e * df**2 + k * df
r = reduced_eq.match(s)
if r and r[d] != 0:
y = Dummy("y")
g = simplify(r[e] / r[d]).subs(f(x), y)
h = simplify(r[k] / r[d]).subs(f(x), y)
if y in h.free_symbols or x in g.free_symbols:
pass
else:
r = {"g": g, "h": h, "y": y}
matching_hints["Liouville"] = r
matching_hints["Liouville_Integral"] = r
# Homogeneous second order differential equation of the form
# a3*f(x).diff(x, 2) + b3*f(x).diff(x) + c3
# It has a definite power series solution at point x0 if, b3/a3 and c3/a3
# are analytic at x0.
deq = a3 * (f(x).diff(x, 2)) + b3 * df + c3 * f(x)
r = collect(reduced_eq, [f(x).diff(x, 2), f(x).diff(x), f(x)]).match(deq)
ordinary = False
if r:
if not all([r[key].is_polynomial() for key in r]):
n, d = reduced_eq.as_numer_denom()
reduced_eq = expand(n)
r = collect(reduced_eq, [f(x).diff(x, 2), f(x).diff(x), f(x)]).match(
deq
)
if r and r[a3] != 0:
p = cancel(r[b3] / r[a3]) # Used below
q = cancel(r[c3] / r[a3]) # Used below
point = kwargs.get("x0", 0)
check = p.subs(x, point)
if not check.has(oo, NaN, zoo, -oo):
check = q.subs(x, point)
if not check.has(oo, NaN, zoo, -oo):
ordinary = True
r.update(
{"a3": a3, "b3": b3, "c3": c3, "x0": point, "terms": terms}
)
matching_hints["2nd_power_series_ordinary"] = r
# Checking if the differential equation has a regular singular point
# at x0. It has a regular singular point at x0, if (b3/a3)*(x - x0)
# and (c3/a3)*((x - x0)**2) are analytic at x0.
if not ordinary:
p = cancel((x - point) * p)
check = p.subs(x, point)
if not check.has(oo, NaN, zoo, -oo):
q = cancel(((x - point) ** 2) * q)
check = q.subs(x, point)
if not check.has(oo, NaN, zoo, -oo):
coeff_dict = {"p": p, "q": q, "x0": point, "terms": terms}
matching_hints["2nd_power_series_regular"] = coeff_dict
# For Hypergeometric solutions.
_r = {}
_r.update(r)
rn = match_2nd_hypergeometric(_r, func)
if rn:
matching_hints["2nd_hypergeometric"] = rn
matching_hints["2nd_hypergeometric_Integral"] = rn
# If the ODE has regular singular point at x0 and is of the form
# Eq((x)**2*Derivative(y(x), x, x) + x*Derivative(y(x), x) +
# (a4**2*x**(2*p)-n**2)*y(x) thus Bessel's equation
rn = match_2nd_linear_bessel(r, f(x))
if rn:
matching_hints["2nd_linear_bessel"] = rn
# If the ODE is ordinary and is of the form of Airy's Equation
# Eq(x**2*Derivative(y(x),x,x)-(ax+b)*y(x))
if p.is_zero:
a4 = Wild("a4", exclude=[x, f(x), df])
b4 = Wild("b4", exclude=[x, f(x), df])
rn = q.match(a4 + b4 * x)
if rn and rn[b4] != 0:
rn = {"b": rn[a4], "m": rn[b4]}
matching_hints["2nd_linear_airy"] = rn
if order > 0:
# Any ODE that can be solved with a substitution and
# repeated integration e.g.:
# `d^2/dx^2(y) + x*d/dx(y) = constant
# f'(x) must be finite for this to work
r = _nth_order_reducible_match(reduced_eq, func)
if r:
matching_hints["nth_order_reducible"] = r
# nth order linear ODE
# a_n(x)y^(n) + ... + a_1(x)y' + a_0(x)y = F(x) = b
r = _nth_linear_match(reduced_eq, func, order)
# Constant coefficient case (a_i is constant for all i)
if r and not any(r[i].has(x) for i in r if i >= 0):
# Inhomogeneous case: F(x) is not identically 0
if r[-1]:
undetcoeff = _undetermined_coefficients_match(r[-1], x)
s = "nth_linear_constant_coeff_variation_of_parameters"
matching_hints[s] = r
matching_hints[s + "_Integral"] = r
if undetcoeff["test"]:
r["trialset"] = undetcoeff["trialset"]
matching_hints[
"nth_linear_constant_coeff_undetermined_coefficients"
] = r
# Homogeneous case: F(x) is identically 0
else:
matching_hints["nth_linear_constant_coeff_homogeneous"] = r
# nth order Euler equation a_n*x**n*y^(n) + ... + a_1*x*y' + a_0*y = F(x)
# In case of Homogeneous euler equation F(x) = 0
def _test_term(coeff, order):
r"""
Linear Euler ODEs have the form K*x**order*diff(y(x),x,order) = F(x),
where K is independent of x and y(x), order>= 0.
So we need to check that for each term, coeff == K*x**order from
some K. We have a few cases, since coeff may have several
different types.
"""
if order < 0:
raise ValueError("order should be greater than 0")
if coeff == 0:
return True
if order == 0:
if x in coeff.free_symbols:
return False
return True
if coeff.is_Mul:
if coeff.has(f(x)):
return False
return x**order in coeff.args
elif coeff.is_Pow:
return coeff.as_base_exp() == (x, order)
elif order == 1:
return x == coeff
return False
# Find coefficient for highest derivative, multiply coefficients to
# bring the equation into Euler form if possible
r_rescaled = None
if r is not None:
coeff = r[order]
factor = x**order / coeff
r_rescaled = {i: factor * r[i] for i in r if i != "trialset"}
# XXX: Mixing up the trialset with the coefficients is error-prone.
# These should be separated as something like r['coeffs'] and
# r['trialset']
if r_rescaled and not any(
not _test_term(r_rescaled[i], i)
for i in r_rescaled
if i != "trialset" and i >= 0
):
if not r_rescaled[-1]:
matching_hints["nth_linear_euler_eq_homogeneous"] = r_rescaled
else:
matching_hints[
"nth_linear_euler_eq_nonhomogeneous_variation_of_parameters"
] = r_rescaled
matching_hints[
"nth_linear_euler_eq_nonhomogeneous_variation_of_parameters_Integral"
] = r_rescaled
e, re = posify(r_rescaled[-1].subs(x, exp(x)))
undetcoeff = _undetermined_coefficients_match(e.subs(re), x)
if undetcoeff["test"]:
r_rescaled["trialset"] = undetcoeff["trialset"]
matching_hints[
"nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients"
] = r_rescaled
# Order keys based on allhints.
retlist = [i for i in allhints if i in matching_hints]
if dict:
# Dictionaries are ordered arbitrarily, so make note of which
# hint would come first for dsolve(). Use an ordered dict in Py 3.
matching_hints["default"] = retlist[0] if retlist else None
matching_hints["ordered_hints"] = tuple(retlist)
return matching_hints
else:
return tuple(retlist)
|
https://github.com/sympy/sympy/issues/5096
|
dsolve(f(x).diff(x, x) + f(x) - x*sin(x - 2), f(x), hint='nth_linear_constant_coeff_undetermined_coefficients')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "sympy/solvers/ode.py", line 457, in dsolve
match=hints[hint]), func, hints['order'], hint)
File "sympy/solvers/ode.py", line 2370, in ode_nth_linear_constant_coeff_undetermined_coefficients
return _solve_undetermined_coefficients(eq, func, order, match)
File "sympy/solvers/ode.py", line 2460, in _solve_undetermined_coefficients
" method of undetermined coefficients (unable to solve for coefficients).")
NotImplementedError: Could not solve -x*sin(-2 + x) + D(f(x), x, x) + f(x) using the method of undetermined coefficients (unable to solve for coefficients).
|
NotImplementedError
|
def ode_nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients(
eq, func, order, match, returns="sol"
):
r"""
Solves an `n`\th order linear non homogeneous Cauchy-Euler equidimensional
ordinary differential equation using undetermined coefficients.
This is an equation with form `g(x) = a_0 f(x) + a_1 x f'(x) + a_2 x^2 f''(x)
\cdots`.
These equations can be solved in a general manner, by substituting
solutions of the form `x = exp(t)`, and deriving a characteristic equation
of form `g(exp(t)) = b_0 f(t) + b_1 f'(t) + b_2 f''(t) \cdots` which can
be then solved by nth_linear_constant_coeff_undetermined_coefficients if
g(exp(t)) has finite number of linearly independent derivatives.
Functions that fit this requirement are finite sums functions of the form
`a x^i e^{b x} \sin(c x + d)` or `a x^i e^{b x} \cos(c x + d)`, where `i`
is a non-negative integer and `a`, `b`, `c`, and `d` are constants. For
example any polynomial in `x`, functions like `x^2 e^{2 x}`, `x \sin(x)`,
and `e^x \cos(x)` can all be used. Products of `\sin`'s and `\cos`'s have
a finite number of derivatives, because they can be expanded into `\sin(a
x)` and `\cos(b x)` terms. However, SymPy currently cannot do that
expansion, so you will need to manually rewrite the expression in terms of
the above to use this method. So, for example, you will need to manually
convert `\sin^2(x)` into `(1 + \cos(2 x))/2` to properly apply the method
of undetermined coefficients on it.
After replacement of x by exp(t), this method works by creating a trial function
from the expression and all of its linear independent derivatives and
substituting them into the original ODE. The coefficients for each term
will be a system of linear equations, which are be solved for and
substituted, giving the solution. If any of the trial functions are linearly
dependent on the solution to the homogeneous equation, they are multiplied
by sufficient `x` to make them linearly independent.
Examples
========
>>> from sympy import dsolve, Function, Derivative, log
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = x**2*Derivative(f(x), x, x) - 2*x*Derivative(f(x), x) + 2*f(x) - log(x)
>>> dsolve(eq, f(x),
... hint='nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients').expand()
Eq(f(x), C1*x + C2*x**2 + log(x)/2 + 3/4)
"""
x = func.args[0]
f = func.func
r = match
chareq, eq, symbol = S.Zero, S.Zero, Dummy("x")
for i in r.keys():
if not isinstance(i, str) and i >= 0:
chareq += (r[i] * diff(x**symbol, x, i) * x**-symbol).expand()
for i in range(1, degree(Poly(chareq, symbol)) + 1):
eq += chareq.coeff(symbol**i) * diff(f(x), x, i)
if chareq.as_coeff_add(symbol)[0]:
eq += chareq.as_coeff_add(symbol)[0] * f(x)
e, re = posify(r[-1].subs(x, exp(x)))
eq += e.subs(re)
match = _nth_linear_match(eq, f(x), ode_order(eq, f(x)))
eq_homogeneous = Add(eq, -match[-1])
match["trialset"] = _undetermined_coefficients_match(
match[-1], x, func, eq_homogeneous
)["trialset"]
return (
ode_nth_linear_constant_coeff_undetermined_coefficients(eq, func, order, match)
.subs(x, log(x))
.subs(f(log(x)), f(x))
.expand()
)
|
def ode_nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients(
eq, func, order, match, returns="sol"
):
r"""
Solves an `n`\th order linear non homogeneous Cauchy-Euler equidimensional
ordinary differential equation using undetermined coefficients.
This is an equation with form `g(x) = a_0 f(x) + a_1 x f'(x) + a_2 x^2 f''(x)
\cdots`.
These equations can be solved in a general manner, by substituting
solutions of the form `x = exp(t)`, and deriving a characteristic equation
of form `g(exp(t)) = b_0 f(t) + b_1 f'(t) + b_2 f''(t) \cdots` which can
be then solved by nth_linear_constant_coeff_undetermined_coefficients if
g(exp(t)) has finite number of linearly independent derivatives.
Functions that fit this requirement are finite sums functions of the form
`a x^i e^{b x} \sin(c x + d)` or `a x^i e^{b x} \cos(c x + d)`, where `i`
is a non-negative integer and `a`, `b`, `c`, and `d` are constants. For
example any polynomial in `x`, functions like `x^2 e^{2 x}`, `x \sin(x)`,
and `e^x \cos(x)` can all be used. Products of `\sin`'s and `\cos`'s have
a finite number of derivatives, because they can be expanded into `\sin(a
x)` and `\cos(b x)` terms. However, SymPy currently cannot do that
expansion, so you will need to manually rewrite the expression in terms of
the above to use this method. So, for example, you will need to manually
convert `\sin^2(x)` into `(1 + \cos(2 x))/2` to properly apply the method
of undetermined coefficients on it.
After replacement of x by exp(t), this method works by creating a trial function
from the expression and all of its linear independent derivatives and
substituting them into the original ODE. The coefficients for each term
will be a system of linear equations, which are be solved for and
substituted, giving the solution. If any of the trial functions are linearly
dependent on the solution to the homogeneous equation, they are multiplied
by sufficient `x` to make them linearly independent.
Examples
========
>>> from sympy import dsolve, Function, Derivative, log
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = x**2*Derivative(f(x), x, x) - 2*x*Derivative(f(x), x) + 2*f(x) - log(x)
>>> dsolve(eq, f(x),
... hint='nth_linear_euler_eq_nonhomogeneous_undetermined_coefficients').expand()
Eq(f(x), C1*x + C2*x**2 + log(x)/2 + 3/4)
"""
x = func.args[0]
f = func.func
r = match
chareq, eq, symbol = S.Zero, S.Zero, Dummy("x")
for i in r.keys():
if not isinstance(i, str) and i >= 0:
chareq += (r[i] * diff(x**symbol, x, i) * x**-symbol).expand()
for i in range(1, degree(Poly(chareq, symbol)) + 1):
eq += chareq.coeff(symbol**i) * diff(f(x), x, i)
if chareq.as_coeff_add(symbol)[0]:
eq += chareq.as_coeff_add(symbol)[0] * f(x)
e, re = posify(r[-1].subs(x, exp(x)))
eq += e.subs(re)
match = _nth_linear_match(eq, f(x), ode_order(eq, f(x)))
match["trialset"] = r["trialset"]
return (
ode_nth_linear_constant_coeff_undetermined_coefficients(eq, func, order, match)
.subs(x, log(x))
.subs(f(log(x)), f(x))
.expand()
)
|
https://github.com/sympy/sympy/issues/5096
|
dsolve(f(x).diff(x, x) + f(x) - x*sin(x - 2), f(x), hint='nth_linear_constant_coeff_undetermined_coefficients')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "sympy/solvers/ode.py", line 457, in dsolve
match=hints[hint]), func, hints['order'], hint)
File "sympy/solvers/ode.py", line 2370, in ode_nth_linear_constant_coeff_undetermined_coefficients
return _solve_undetermined_coefficients(eq, func, order, match)
File "sympy/solvers/ode.py", line 2460, in _solve_undetermined_coefficients
" method of undetermined coefficients (unable to solve for coefficients).")
NotImplementedError: Could not solve -x*sin(-2 + x) + D(f(x), x, x) + f(x) using the method of undetermined coefficients (unable to solve for coefficients).
|
NotImplementedError
|
def _solve_undetermined_coefficients(eq, func, order, match):
r"""
Helper function for the method of undetermined coefficients.
See the
:py:meth:`~sympy.solvers.ode.ode_nth_linear_constant_coeff_undetermined_coefficients`
docstring for more information on this method.
The parameter ``match`` should be a dictionary that has the following
keys:
``list``
A list of solutions to the homogeneous equation, such as the list
returned by
``ode_nth_linear_constant_coeff_homogeneous(returns='list')``.
``sol``
The general solution, such as the solution returned by
``ode_nth_linear_constant_coeff_homogeneous(returns='sol')``.
``trialset``
The set of trial functions as returned by
``_undetermined_coefficients_match()['trialset']``.
"""
x = func.args[0]
f = func.func
r = match
coeffs = numbered_symbols("a", cls=Dummy)
coefflist = []
gensols = r["list"]
gsol = r["sol"]
trialset = r["trialset"]
if len(gensols) != order:
raise NotImplementedError(
"Cannot find "
+ str(order)
+ " solutions to the homogeneous equation necessary to apply"
+ " undetermined coefficients to "
+ str(eq)
+ " (number of terms != order)"
)
trialfunc = 0
for i in trialset:
c = next(coeffs)
coefflist.append(c)
trialfunc += c * i
eqs = sub_func_doit(eq, f(x), trialfunc)
coeffsdict = dict(list(zip(trialset, [0] * (len(trialset) + 1))))
eqs = _mexpand(eqs)
for i in Add.make_args(eqs):
s = separatevars(i, dict=True, symbols=[x])
if coeffsdict.get(s[x]):
coeffsdict[s[x]] += s["coeff"]
else:
coeffsdict[s[x]] = s["coeff"]
coeffvals = solve(list(coeffsdict.values()), coefflist)
if not coeffvals:
raise NotImplementedError(
"Could not solve `%s` using the "
"method of undetermined coefficients "
"(unable to solve for coefficients)." % eq
)
psol = trialfunc.subs(coeffvals)
return Eq(f(x), gsol.rhs + psol)
|
def _solve_undetermined_coefficients(eq, func, order, match):
r"""
Helper function for the method of undetermined coefficients.
See the
:py:meth:`~sympy.solvers.ode.ode_nth_linear_constant_coeff_undetermined_coefficients`
docstring for more information on this method.
The parameter ``match`` should be a dictionary that has the following
keys:
``list``
A list of solutions to the homogeneous equation, such as the list
returned by
``ode_nth_linear_constant_coeff_homogeneous(returns='list')``.
``sol``
The general solution, such as the solution returned by
``ode_nth_linear_constant_coeff_homogeneous(returns='sol')``.
``trialset``
The set of trial functions as returned by
``_undetermined_coefficients_match()['trialset']``.
"""
x = func.args[0]
f = func.func
r = match
coeffs = numbered_symbols("a", cls=Dummy)
coefflist = []
gensols = r["list"]
gsol = r["sol"]
trialset = r["trialset"]
notneedset = set([])
# XXX: This global collectterms hack should be removed.
global collectterms
if len(gensols) != order:
raise NotImplementedError(
"Cannot find "
+ str(order)
+ " solutions to the homogeneous equation necessary to apply"
+ " undetermined coefficients to "
+ str(eq)
+ " (number of terms != order)"
)
usedsin = set([])
mult = 0 # The multiplicity of the root
getmult = True
for i, reroot, imroot in collectterms:
if getmult:
mult = i + 1
getmult = False
if i == 0:
getmult = True
if imroot:
# Alternate between sin and cos
if (i, reroot) in usedsin:
check = x**i * exp(reroot * x) * cos(imroot * x)
else:
check = x**i * exp(reroot * x) * sin(abs(imroot) * x)
usedsin.add((i, reroot))
else:
check = x**i * exp(reroot * x)
if check in trialset:
# If an element of the trial function is already part of the
# homogeneous solution, we need to multiply by sufficient x to
# make it linearly independent. We also don't need to bother
# checking for the coefficients on those elements, since we
# already know it will be 0.
while True:
if check * x**mult in trialset:
mult += 1
else:
break
trialset.add(check * x**mult)
notneedset.add(check)
newtrialset = trialset - notneedset
trialfunc = 0
for i in newtrialset:
c = next(coeffs)
coefflist.append(c)
trialfunc += c * i
eqs = sub_func_doit(eq, f(x), trialfunc)
coeffsdict = dict(list(zip(trialset, [0] * (len(trialset) + 1))))
eqs = _mexpand(eqs)
for i in Add.make_args(eqs):
s = separatevars(i, dict=True, symbols=[x])
coeffsdict[s[x]] += s["coeff"]
coeffvals = solve(list(coeffsdict.values()), coefflist)
if not coeffvals:
raise NotImplementedError(
"Could not solve `%s` using the "
"method of undetermined coefficients "
"(unable to solve for coefficients)." % eq
)
psol = trialfunc.subs(coeffvals)
return Eq(f(x), gsol.rhs + psol)
|
https://github.com/sympy/sympy/issues/5096
|
dsolve(f(x).diff(x, x) + f(x) - x*sin(x - 2), f(x), hint='nth_linear_constant_coeff_undetermined_coefficients')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "sympy/solvers/ode.py", line 457, in dsolve
match=hints[hint]), func, hints['order'], hint)
File "sympy/solvers/ode.py", line 2370, in ode_nth_linear_constant_coeff_undetermined_coefficients
return _solve_undetermined_coefficients(eq, func, order, match)
File "sympy/solvers/ode.py", line 2460, in _solve_undetermined_coefficients
" method of undetermined coefficients (unable to solve for coefficients).")
NotImplementedError: Could not solve -x*sin(-2 + x) + D(f(x), x, x) + f(x) using the method of undetermined coefficients (unable to solve for coefficients).
|
NotImplementedError
|
def _undetermined_coefficients_match(expr, x, func=None, eq_homogeneous=S.Zero):
r"""
Returns a trial function match if undetermined coefficients can be applied
to ``expr``, and ``None`` otherwise.
A trial expression can be found for an expression for use with the method
of undetermined coefficients if the expression is an
additive/multiplicative combination of constants, polynomials in `x` (the
independent variable of expr), `\sin(a x + b)`, `\cos(a x + b)`, and
`e^{a x}` terms (in other words, it has a finite number of linearly
independent derivatives).
Note that you may still need to multiply each term returned here by
sufficient `x` to make it linearly independent with the solutions to the
homogeneous equation.
This is intended for internal use by ``undetermined_coefficients`` hints.
SymPy currently has no way to convert `\sin^n(x) \cos^m(y)` into a sum of
only `\sin(a x)` and `\cos(b x)` terms, so these are not implemented. So,
for example, you will need to manually convert `\sin^2(x)` into `[1 +
\cos(2 x)]/2` to properly apply the method of undetermined coefficients on
it.
Examples
========
>>> from sympy import log, exp
>>> from sympy.solvers.ode import _undetermined_coefficients_match
>>> from sympy.abc import x
>>> _undetermined_coefficients_match(9*x*exp(x) + exp(-x), x)
{'test': True, 'trialset': {x*exp(x), exp(-x), exp(x)}}
>>> _undetermined_coefficients_match(log(x), x)
{'test': False}
"""
a = Wild("a", exclude=[x])
b = Wild("b", exclude=[x])
expr = powsimp(expr, combine="exp") # exp(x)*exp(2*x + 1) => exp(3*x + 1)
retdict = {}
def _test_term(expr, x):
r"""
Test if ``expr`` fits the proper form for undetermined coefficients.
"""
if not expr.has(x):
return True
elif expr.is_Add:
return all(_test_term(i, x) for i in expr.args)
elif expr.is_Mul:
if expr.has(sin, cos):
foundtrig = False
# Make sure that there is only one trig function in the args.
# See the docstring.
for i in expr.args:
if i.has(sin, cos):
if foundtrig:
return False
else:
foundtrig = True
return all(_test_term(i, x) for i in expr.args)
elif expr.is_Function:
if expr.func in (sin, cos, exp):
if expr.args[0].match(a * x + b):
return True
else:
return False
else:
return False
elif (
expr.is_Pow
and expr.base.is_Symbol
and expr.exp.is_Integer
and expr.exp >= 0
):
return True
elif expr.is_Pow and expr.base.is_number:
if expr.exp.match(a * x + b):
return True
else:
return False
elif expr.is_Symbol or expr.is_number:
return True
else:
return False
def _get_trial_set(expr, x, exprs=set([])):
r"""
Returns a set of trial terms for undetermined coefficients.
The idea behind undetermined coefficients is that the terms expression
repeat themselves after a finite number of derivatives, except for the
coefficients (they are linearly dependent). So if we collect these,
we should have the terms of our trial function.
"""
def _remove_coefficient(expr, x):
r"""
Returns the expression without a coefficient.
Similar to expr.as_independent(x)[1], except it only works
multiplicatively.
"""
term = S.One
if expr.is_Mul:
for i in expr.args:
if i.has(x):
term *= i
elif expr.has(x):
term = expr
return term
expr = expand_mul(expr)
if expr.is_Add:
for term in expr.args:
if _remove_coefficient(term, x) in exprs:
pass
else:
exprs.add(_remove_coefficient(term, x))
exprs = exprs.union(_get_trial_set(term, x, exprs))
else:
term = _remove_coefficient(expr, x)
tmpset = exprs.union({term})
oldset = set([])
while tmpset != oldset:
# If you get stuck in this loop, then _test_term is probably
# broken
oldset = tmpset.copy()
expr = expr.diff(x)
term = _remove_coefficient(expr, x)
if term.is_Add:
tmpset = tmpset.union(_get_trial_set(term, x, tmpset))
else:
tmpset.add(term)
exprs = tmpset
return exprs
def is_homogeneous_solution(term):
r"""This function checks whether the given trialset contains any root
of homogenous equation"""
return expand(sub_func_doit(eq_homogeneous, func, term)).is_zero
retdict["test"] = _test_term(expr, x)
if retdict["test"]:
# Try to generate a list of trial solutions that will have the
# undetermined coefficients. Note that if any of these are not linearly
# independent with any of the solutions to the homogeneous equation,
# then they will need to be multiplied by sufficient x to make them so.
# This function DOES NOT do that (it doesn't even look at the
# homogeneous equation).
temp_set = set([])
for i in Add.make_args(expr):
act = _get_trial_set(i, x)
if eq_homogeneous is not S.Zero:
while any(is_homogeneous_solution(ts) for ts in act):
act = {x * ts for ts in act}
temp_set = temp_set.union(act)
retdict["trialset"] = temp_set
return retdict
|
def _undetermined_coefficients_match(expr, x):
r"""
Returns a trial function match if undetermined coefficients can be applied
to ``expr``, and ``None`` otherwise.
A trial expression can be found for an expression for use with the method
of undetermined coefficients if the expression is an
additive/multiplicative combination of constants, polynomials in `x` (the
independent variable of expr), `\sin(a x + b)`, `\cos(a x + b)`, and
`e^{a x}` terms (in other words, it has a finite number of linearly
independent derivatives).
Note that you may still need to multiply each term returned here by
sufficient `x` to make it linearly independent with the solutions to the
homogeneous equation.
This is intended for internal use by ``undetermined_coefficients`` hints.
SymPy currently has no way to convert `\sin^n(x) \cos^m(y)` into a sum of
only `\sin(a x)` and `\cos(b x)` terms, so these are not implemented. So,
for example, you will need to manually convert `\sin^2(x)` into `[1 +
\cos(2 x)]/2` to properly apply the method of undetermined coefficients on
it.
Examples
========
>>> from sympy import log, exp
>>> from sympy.solvers.ode import _undetermined_coefficients_match
>>> from sympy.abc import x
>>> _undetermined_coefficients_match(9*x*exp(x) + exp(-x), x)
{'test': True, 'trialset': {x*exp(x), exp(-x), exp(x)}}
>>> _undetermined_coefficients_match(log(x), x)
{'test': False}
"""
a = Wild("a", exclude=[x])
b = Wild("b", exclude=[x])
expr = powsimp(expr, combine="exp") # exp(x)*exp(2*x + 1) => exp(3*x + 1)
retdict = {}
def _test_term(expr, x):
r"""
Test if ``expr`` fits the proper form for undetermined coefficients.
"""
if not expr.has(x):
return True
elif expr.is_Add:
return all(_test_term(i, x) for i in expr.args)
elif expr.is_Mul:
if expr.has(sin, cos):
foundtrig = False
# Make sure that there is only one trig function in the args.
# See the docstring.
for i in expr.args:
if i.has(sin, cos):
if foundtrig:
return False
else:
foundtrig = True
return all(_test_term(i, x) for i in expr.args)
elif expr.is_Function:
if expr.func in (sin, cos, exp):
if expr.args[0].match(a * x + b):
return True
else:
return False
else:
return False
elif (
expr.is_Pow
and expr.base.is_Symbol
and expr.exp.is_Integer
and expr.exp >= 0
):
return True
elif expr.is_Pow and expr.base.is_number:
if expr.exp.match(a * x + b):
return True
else:
return False
elif expr.is_Symbol or expr.is_number:
return True
else:
return False
def _get_trial_set(expr, x, exprs=set([])):
r"""
Returns a set of trial terms for undetermined coefficients.
The idea behind undetermined coefficients is that the terms expression
repeat themselves after a finite number of derivatives, except for the
coefficients (they are linearly dependent). So if we collect these,
we should have the terms of our trial function.
"""
def _remove_coefficient(expr, x):
r"""
Returns the expression without a coefficient.
Similar to expr.as_independent(x)[1], except it only works
multiplicatively.
"""
term = S.One
if expr.is_Mul:
for i in expr.args:
if i.has(x):
term *= i
elif expr.has(x):
term = expr
return term
expr = expand_mul(expr)
if expr.is_Add:
for term in expr.args:
if _remove_coefficient(term, x) in exprs:
pass
else:
exprs.add(_remove_coefficient(term, x))
exprs = exprs.union(_get_trial_set(term, x, exprs))
else:
term = _remove_coefficient(expr, x)
tmpset = exprs.union({term})
oldset = set([])
while tmpset != oldset:
# If you get stuck in this loop, then _test_term is probably
# broken
oldset = tmpset.copy()
expr = expr.diff(x)
term = _remove_coefficient(expr, x)
if term.is_Add:
tmpset = tmpset.union(_get_trial_set(term, x, tmpset))
else:
tmpset.add(term)
exprs = tmpset
return exprs
retdict["test"] = _test_term(expr, x)
if retdict["test"]:
# Try to generate a list of trial solutions that will have the
# undetermined coefficients. Note that if any of these are not linearly
# independent with any of the solutions to the homogeneous equation,
# then they will need to be multiplied by sufficient x to make them so.
# This function DOES NOT do that (it doesn't even look at the
# homogeneous equation).
retdict["trialset"] = _get_trial_set(expr, x)
return retdict
|
https://github.com/sympy/sympy/issues/5096
|
dsolve(f(x).diff(x, x) + f(x) - x*sin(x - 2), f(x), hint='nth_linear_constant_coeff_undetermined_coefficients')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "sympy/solvers/ode.py", line 457, in dsolve
match=hints[hint]), func, hints['order'], hint)
File "sympy/solvers/ode.py", line 2370, in ode_nth_linear_constant_coeff_undetermined_coefficients
return _solve_undetermined_coefficients(eq, func, order, match)
File "sympy/solvers/ode.py", line 2460, in _solve_undetermined_coefficients
" method of undetermined coefficients (unable to solve for coefficients).")
NotImplementedError: Could not solve -x*sin(-2 + x) + D(f(x), x, x) + f(x) using the method of undetermined coefficients (unable to solve for coefficients).
|
NotImplementedError
|
def _minpoly_compose(ex, x, dom):
"""
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> from sympy import minimal_polynomial, sqrt, Rational
>>> from sympy.abc import x, y
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), x, compose=True)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, x, compose=True)
x**2*y**2 - 2*x*y - y**3 + 1
"""
if ex.is_Rational:
return ex.q * x - ex.p
if ex is I:
_, factors = factor_list(x**2 + 1, x, domain=dom)
return x**2 + 1 if len(factors) == 1 else x - I
if hasattr(dom, "symbols") and ex in dom.symbols:
return x - ex
if dom.is_QQ and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *ex.args)
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = dict(r[True])
dens = [y.q for y in r1.values()]
lcmdens = reduce(lcm, dens, 1)
neg1 = S.NegativeOne
expn1 = r1.pop(neg1, S.Zero)
nums = [base ** (y.p * lcmdens // y.q) for base, y in r1.items()]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1, x)
# use the fact that in SymPy canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
# Powers of -1 have to be treated separately to preserve sign.
mp2 = ex2.q * x**lcmdens - ex2.p * neg1 ** (expn1 * lcmdens)
ex2 = neg1**expn1 * ex2 ** Rational(1, lcmdens)
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *ex.args)
elif ex.is_Pow:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif ex.__class__ is sin:
res = _minpoly_sin(ex, x)
elif ex.__class__ is cos:
res = _minpoly_cos(ex, x)
elif ex.__class__ is exp:
res = _minpoly_exp(ex, x)
elif ex.__class__ is CRootOf:
res = _minpoly_rootof(ex, x)
else:
raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex)
return res
|
def _minpoly_compose(ex, x, dom):
"""
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> from sympy import minimal_polynomial, sqrt, Rational
>>> from sympy.abc import x, y
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), x, compose=True)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, x, compose=True)
x**2*y**2 - 2*x*y - y**3 + 1
"""
if ex.is_Rational:
return ex.q * x - ex.p
if ex is I:
_, factors = factor_list(x**2 + 1, x, domain=dom)
return x**2 + 1 if len(factors) == 1 else x - I
if hasattr(dom, "symbols") and ex in dom.symbols:
return x - ex
if dom.is_QQ and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *ex.args)
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = r[True]
dens = [y.q for _, y in r1]
lcmdens = reduce(lcm, dens, 1)
nums = [base ** (y.p * lcmdens // y.q) for base, y in r1]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1, x)
# use the fact that in SymPy canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
mp2 = ex2.q * x**lcmdens - ex2.p
ex2 = ex2 ** Rational(1, lcmdens)
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *ex.args)
elif ex.is_Pow:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif ex.__class__ is sin:
res = _minpoly_sin(ex, x)
elif ex.__class__ is cos:
res = _minpoly_cos(ex, x)
elif ex.__class__ is exp:
res = _minpoly_exp(ex, x)
elif ex.__class__ is CRootOf:
res = _minpoly_rootof(ex, x)
else:
raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex)
return res
|
https://github.com/sympy/sympy/issues/5934
|
I found this from the expression from issue 5933 :
In [50]: [minpoly(i) for i in x.as_numer_denom()]
---------------------------------------------------------------------------
PolynomialError Traceback (most recent call last)
/Users/aaronmeurer/Documents/python/sympy/sympy/<ipython-input-50-ffbba6cffd4e> in <module>()
----> 1 [minpoly(i) for i in x.as_numer_denom()]
/Users/aaronmeurer/Documents/python/sympy/sympy/sympy/polys/numberfields.pyc in minimal_polynomial(ex, x, **args)
130 else:
131 F = [x - bottom_up_scan(ex)] + mapping.values()
--> 132 G = groebner(F, symbols.values() + [x], order='lex')
133
134 _, factors = factor_list(G[-1])
/Users/aaronmeurer/Documents/python/sympy/sympy/sympy/polys/polytools.pyc in groebner(F, *gens, **args)
5458
5459 """
-> 5460 return GroebnerBasis(F, *gens, **args)
5461
5462 def is_zero_dimensional(F, *gens, **args):
/Users/aaronmeurer/Documents/python/sympy/sympy/sympy/polys/polytools.pyc in __new__(cls, F, *gens, **args)
5485
5486 try:
-> 5487 polys, opt = parallel_poly_from_expr(F, *gens, **args)
5488 except PolificationFailed, exc:
5489 raise ComputationFailed('groebner', len(F), exc)
/Users/aaronmeurer/Documents/python/sympy/sympy/sympy/polys/polytools.pyc in parallel_poly_from_expr(exprs, *gens, **args)
3670 """Construct polynomials from expressions. """
3671 opt = options.build_options(gens, args)
-> 3672 return _parallel_poly_from_expr(exprs, opt)
3673
3674 def _parallel_poly_from_expr(exprs, opt):
/Users/aaronmeurer/Documents/python/sympy/sympy/sympy/polys/polytools.pyc in _parallel_poly_from_expr(exprs, opt)
3721
3722 try:
-> 3723 reps, opt = _parallel_dict_from_expr(exprs, opt)
3724 except GeneratorsNeeded:
3725 raise PolificationFailed(opt, origs, exprs, True)
/Users/aaronmeurer/Documents/python/sympy/sympy/sympy/polys/polyutils.pyc in _parallel_dict_from_expr(exprs, opt)
276
277 if opt.gens:
--> 278 reps, gens = _parallel_dict_from_expr_if_gens(exprs, opt)
279 else:
280 reps, gens = _parallel_dict_from_expr_no_gens(exprs, opt)
/Users/aaronmeurer/Documents/python/sympy/sympy/sympy/polys/polyutils.pyc in _parallel_dict_from_expr_if_gens(exprs, opt)
161 coeff.append(factor)
162 else:
--> 163 raise PolynomialError("%s contains an element of the generators set" % factor)
164
165 monom = tuple(monom)
PolynomialError: sqrt(_a4) contains an element of the generators set
In [83]: print x
(-240*sqrt(2)*sqrt(sqrt(5) + 5)*sqrt(8*sqrt(5) + 40) - 360*sqrt(2)*sqrt(-8*sqrt(5) + 40)*sqrt(-sqrt(5) + 5) - 120*sqrt(10)*sqrt(-8*sqrt(5) + 40)*sqrt(-sqrt(5) + 5) + 120*sqrt(2)*sqrt(-8*sqrt(5) + 40)*sqrt(sqrt(5) + 5) + 120*sqrt(2)*sqrt(-sqrt(5) + 5)*sqrt(8*sqrt(5) + 40) + 120*sqrt(10)*sqrt(-8*sqrt(5) + 40)*sqrt(sqrt(5) + 5) + 120*sqrt(10)*sqrt(-sqrt(5) + 5)*sqrt(8*sqrt(5) + 40))/(-36000 - 7200*sqrt(5) + (12*sqrt(10)*sqrt(sqrt(5) + 5) + 24*sqrt(10)*sqrt(-sqrt(5) + 5))**2)
The error also occurs if you just call minpoly(x).
|
PolynomialError
|
def _eval_evalf(self, prec):
# Lookup mpmath function based on name
try:
if isinstance(self.func, UndefinedFunction):
# Shouldn't lookup in mpmath but might have ._imp_
raise AttributeError
fname = self.func.__name__
if not hasattr(mpmath, fname):
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
fname = MPMATH_TRANSLATIONS[fname]
func = getattr(mpmath, fname)
except (AttributeError, KeyError):
try:
return Float(self._imp_(*[i.evalf(prec) for i in self.args]), prec)
except (AttributeError, TypeError, ValueError):
return
# Convert all args to mpf or mpc
# Convert the arguments to *higher* precision than requested for the
# final result.
# XXX + 5 is a guess, it is similar to what is used in evalf.py. Should
# we be more intelligent about it?
try:
args = [arg._to_mpmath(prec + 5) for arg in self.args]
def bad(m):
from mpmath import mpf, mpc
# the precision of an mpf value is the last element
# if that is 1 (and m[1] is not 1 which would indicate a
# power of 2), then the eval failed; so check that none of
# the arguments failed to compute to a finite precision.
# Note: An mpc value has two parts, the re and imag tuple;
# check each of those parts, too. Anything else is allowed to
# pass
if isinstance(m, mpf):
m = m._mpf_
return m[1] != 1 and m[-1] == 1
elif isinstance(m, mpc):
m, n = m._mpc_
return m[1] != 1 and m[-1] == 1 and n[1] != 1 and n[-1] == 1
else:
return False
if any(bad(a) for a in args):
raise ValueError # one or more args failed to compute with significance
except ValueError:
return
with mpmath.workprec(prec):
v = func(*args)
return Expr._from_mpmath(v, prec)
|
def _eval_evalf(self, prec):
# Lookup mpmath function based on name
fname = self.func.__name__
try:
if not hasattr(mpmath, fname):
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
fname = MPMATH_TRANSLATIONS[fname]
func = getattr(mpmath, fname)
except (AttributeError, KeyError):
try:
return Float(self._imp_(*[i.evalf(prec) for i in self.args]), prec)
except (AttributeError, TypeError, ValueError):
return
# Convert all args to mpf or mpc
# Convert the arguments to *higher* precision than requested for the
# final result.
# XXX + 5 is a guess, it is similar to what is used in evalf.py. Should
# we be more intelligent about it?
try:
args = [arg._to_mpmath(prec + 5) for arg in self.args]
def bad(m):
from mpmath import mpf, mpc
# the precision of an mpf value is the last element
# if that is 1 (and m[1] is not 1 which would indicate a
# power of 2), then the eval failed; so check that none of
# the arguments failed to compute to a finite precision.
# Note: An mpc value has two parts, the re and imag tuple;
# check each of those parts, too. Anything else is allowed to
# pass
if isinstance(m, mpf):
m = m._mpf_
return m[1] != 1 and m[-1] == 1
elif isinstance(m, mpc):
m, n = m._mpc_
return m[1] != 1 and m[-1] == 1 and n[1] != 1 and n[-1] == 1
else:
return False
if any(bad(a) for a in args):
raise ValueError # one or more args failed to compute with significance
except ValueError:
return
with mpmath.workprec(prec):
v = func(*args)
return Expr._from_mpmath(v, prec)
|
https://github.com/sympy/sympy/issues/6938
|
If I run sympy.S("ceil(2.3)").evalf() with sympy 0.7.1 then I get 3. However, if I run the same using sympy 0.7.2 then I get an AttributeError (see below). I discovered that switching to use ceiling() instead of ceil() works, but I have no idea why ceil() disappeared in the first place. Someone in IRC told me that the AttributeError was not the correct behaviour for 0.7.2 either.
In [32]: S("ceil(2.3)").evalf()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-32-82d9887af7f2> in <module>()
----> 1 S("ceil(2.3)").evalf()
/home/rob/tmp/sympy-install/lib/python2.7/site-packages/sympy/core/sympify.pyc in sympify(a, locals, convert_xor, strict, rational)
176 try:
177 a = a.replace('\n', '')
--> 178 expr = parse_expr(a, locals or {}, rational, convert_xor)
179 except (TokenError, SyntaxError):
180 raise SympifyError('could not parse %r' % a)
/home/rob/tmp/sympy-install/lib/python2.7/site-packages/sympy/parsing/sympy_parser.pyc in parse_expr(s, local_dict, rationalize, convert_xor)
160
161 code = _transform(s.strip(), local_dict, global_dict, rationalize, convert_xor)
--> 162 expr = eval(code, global_dict, local_dict) # take local objects in preference
163
164 if not hit:
<string> in <module>()
/home/rob/tmp/sympy-install/lib/python2.7/site-packages/sympy/core/symbol.pyc in __call__(self, *args)
111 def __call__(self, *args):
112 from function import Function
--> 113 return Function(self.name)(*args)
114
115 def as_real_imag(self, deep=True, **hints):
/home/rob/tmp/sympy-install/lib/python2.7/site-packages/sympy/core/function.pyc in __new__(cls, *args, **options)
596 args = map(sympify, args)
597 result = super(AppliedUndef, cls).__new__(cls, *args, **options)
--> 598 result.nargs = len(args)
599 return result
600
AttributeError: 'Float' object has no attribute 'nargs'
|
AttributeError
|
def factorial_notation(tokens, local_dict, global_dict):
"""Allows standard notation for factorial."""
result = []
nfactorial = 0
for toknum, tokval in tokens:
if toknum == ERRORTOKEN:
op = tokval
if op == "!":
nfactorial += 1
else:
nfactorial = 0
result.append((OP, op))
else:
if nfactorial == 1:
result = _add_factorial_tokens("factorial", result)
elif nfactorial == 2:
result = _add_factorial_tokens("factorial2", result)
elif nfactorial > 2:
raise TokenError
nfactorial = 0
result.append((toknum, tokval))
return result
|
def factorial_notation(tokens, local_dict, global_dict):
"""Allows standard notation for factorial."""
result = []
prevtoken = ""
for toknum, tokval in tokens:
if toknum == OP:
op = tokval
if op == "!!":
if prevtoken == "!" or prevtoken == "!!":
raise TokenError
result = _add_factorial_tokens("factorial2", result)
elif op == "!":
if prevtoken == "!" or prevtoken == "!!":
raise TokenError
result = _add_factorial_tokens("factorial", result)
else:
result.append((OP, op))
else:
result.append((toknum, tokval))
prevtoken = tokval
return result
|
https://github.com/sympy/sympy/issues/4862
|
In [37]: S("α")
--------------------------------------------------------------------------
-
SympifyError Traceback (most recent call last)
/Users/aaronmeurer/Documents/Python/sympy/sympy/<ipython console> in <module>()
/Users/aaronmeurer/Documents/Python/sympy/sympy/sympy/core/sympify.pyc in sympify(a,
locals, convert_xor)
114 a = a.replace('^','**')
115 import ast_parser
--> 116 return ast_parser.parse_expr(a, locals)
117 raise SympifyError("%r is NOT a valid SymPy expression" % a)
118
/Users/aaronmeurer/Documents/Python/sympy/sympy/sympy/core/ast_parser.pyc in
parse_expr(s, local_dict)
89 a = parse(s.strip(), mode="eval")
90 except SyntaxError:
---> 91 raise SympifyError("Cannot parse.")
92 a = Transform(local_dict, global_dict).visit(a)
93 e = compile(a, "<string>", "eval")
SympifyError: SympifyError: Cannot parse.
sympify() should be able to parse greek letters, as they are pretty printed for symbols of that
name:
In [44]: alpha = Symbol('alpha')
In [45]: alpha
Out[45]: α
|
SympifyError
|
def auto_number(tokens, local_dict, global_dict):
"""
Converts numeric literals to use SymPy equivalents.
Complex numbers use ``I``, integer literals use ``Integer``, and float
literals use ``Float``.
"""
result = []
for toknum, tokval in tokens:
if toknum == NUMBER:
number = tokval
postfix = []
if number.endswith("j") or number.endswith("J"):
number = number[:-1]
postfix = [(OP, "*"), (NAME, "I")]
if "." in number or (
("e" in number or "E" in number)
and not (number.startswith("0x") or number.startswith("0X"))
):
seq = [
(NAME, "Float"),
(OP, "("),
(NUMBER, repr(str(number))),
(OP, ")"),
]
else:
seq = [(NAME, "Integer"), (OP, "("), (NUMBER, number), (OP, ")")]
result.extend(seq + postfix)
else:
result.append((toknum, tokval))
return result
|
def auto_number(tokens, local_dict, global_dict):
"""Converts numeric literals to use SymPy equivalents.
Complex numbers use ``I``; integer literals use ``Integer``, float
literals use ``Float``, and repeating decimals use ``Rational``.
"""
result = []
prevtoken = ""
for toknum, tokval in tokens:
if toknum == NUMBER:
number = tokval
postfix = []
if number.endswith("j") or number.endswith("J"):
number = number[:-1]
postfix = [(OP, "*"), (NAME, "I")]
if "." in number or (
("e" in number or "E" in number)
and not (number.startswith("0x") or number.startswith("0X"))
):
match = _re_repeated.match(number)
if match is not None:
# Clear repeating decimals, e.g. 3.4[31] -> (3 + 4/10 + 31/990)
pre, post, repetend = match.groups()
zeros = "0" * len(post)
post, repetends = [w.lstrip("0") for w in [post, repetend]]
# or else interpreted as octal
a = pre or "0"
b, c = post or "0", "1" + zeros
d, e = repetends, ("9" * len(repetend)) + zeros
seq = [
(OP, "("),
(NAME, "Integer"),
(OP, "("),
(NUMBER, a),
(OP, ")"),
(OP, "+"),
(NAME, "Rational"),
(OP, "("),
(NUMBER, b),
(OP, ","),
(NUMBER, c),
(OP, ")"),
(OP, "+"),
(NAME, "Rational"),
(OP, "("),
(NUMBER, d),
(OP, ","),
(NUMBER, e),
(OP, ")"),
(OP, ")"),
]
else:
seq = [
(NAME, "Float"),
(OP, "("),
(NUMBER, repr(str(number))),
(OP, ")"),
]
else:
seq = [(NAME, "Integer"), (OP, "("), (NUMBER, number), (OP, ")")]
result.extend(seq + postfix)
else:
result.append((toknum, tokval))
return result
|
https://github.com/sympy/sympy/issues/4862
|
In [37]: S("α")
--------------------------------------------------------------------------
-
SympifyError Traceback (most recent call last)
/Users/aaronmeurer/Documents/Python/sympy/sympy/<ipython console> in <module>()
/Users/aaronmeurer/Documents/Python/sympy/sympy/sympy/core/sympify.pyc in sympify(a,
locals, convert_xor)
114 a = a.replace('^','**')
115 import ast_parser
--> 116 return ast_parser.parse_expr(a, locals)
117 raise SympifyError("%r is NOT a valid SymPy expression" % a)
118
/Users/aaronmeurer/Documents/Python/sympy/sympy/sympy/core/ast_parser.pyc in
parse_expr(s, local_dict)
89 a = parse(s.strip(), mode="eval")
90 except SyntaxError:
---> 91 raise SympifyError("Cannot parse.")
92 a = Transform(local_dict, global_dict).visit(a)
93 e = compile(a, "<string>", "eval")
SympifyError: SympifyError: Cannot parse.
sympify() should be able to parse greek letters, as they are pretty printed for symbols of that
name:
In [44]: alpha = Symbol('alpha')
In [45]: alpha
Out[45]: α
|
SympifyError
|
def poly(expr, *gens, **args):
"""
Efficiently transform an expression into a polynomial.
Examples
========
>>> from sympy import poly
>>> from sympy.abc import x
>>> poly(x*(x**2 + x - 1)**2)
Poly(x**5 + 2*x**4 - x**3 - 2*x**2 + x, x, domain='ZZ')
"""
options.allowed_flags(args, [])
def _poly(expr, opt):
terms, poly_terms = [], []
for term in Add.make_args(expr):
factors, poly_factors = [], []
for factor in Mul.make_args(term):
if factor.is_Add:
poly_factors.append(_poly(factor, opt))
elif (
factor.is_Pow
and factor.base.is_Add
and factor.exp.is_Integer
and factor.exp >= 0
):
poly_factors.append(_poly(factor.base, opt).pow(factor.exp))
else:
factors.append(factor)
if not poly_factors:
terms.append(term)
else:
product = poly_factors[0]
for factor in poly_factors[1:]:
product = product.mul(factor)
if factors:
factor = Mul(*factors)
if factor.is_Number:
product = product.mul(factor)
else:
product = product.mul(Poly._from_expr(factor, opt))
poly_terms.append(product)
if not poly_terms:
result = Poly._from_expr(expr, opt)
else:
result = poly_terms[0]
for term in poly_terms[1:]:
result = result.add(term)
if terms:
term = Add(*terms)
if term.is_Number:
result = result.add(term)
else:
result = result.add(Poly._from_expr(term, opt))
return result.reorder(*opt.get("gens", ()), **args)
expr = sympify(expr)
if expr.is_Poly:
return Poly(expr, *gens, **args)
if "expand" not in args:
args["expand"] = False
opt = options.build_options(gens, args)
return _poly(expr, opt)
|
def poly(expr, *gens, **args):
"""
Efficiently transform an expression into a polynomial.
Examples
========
>>> from sympy import poly
>>> from sympy.abc import x
>>> poly(x*(x**2 + x - 1)**2)
Poly(x**5 + 2*x**4 - x**3 - 2*x**2 + x, x, domain='ZZ')
"""
options.allowed_flags(args, [])
def _poly(expr, opt):
terms, poly_terms = [], []
for term in Add.make_args(expr):
factors, poly_factors = [], []
for factor in Mul.make_args(term):
if factor.is_Add:
poly_factors.append(_poly(factor, opt))
elif factor.is_Pow and factor.base.is_Add and factor.exp.is_Integer:
poly_factors.append(_poly(factor.base, opt).pow(factor.exp))
else:
factors.append(factor)
if not poly_factors:
terms.append(term)
else:
product = poly_factors[0]
for factor in poly_factors[1:]:
product = product.mul(factor)
if factors:
factor = Mul(*factors)
if factor.is_Number:
product = product.mul(factor)
else:
product = product.mul(Poly._from_expr(factor, opt))
poly_terms.append(product)
if not poly_terms:
result = Poly._from_expr(expr, opt)
else:
result = poly_terms[0]
for term in poly_terms[1:]:
result = result.add(term)
if terms:
term = Add(*terms)
if term.is_Number:
result = result.add(term)
else:
result = result.add(Poly._from_expr(term, opt))
return result.reorder(*opt.get("gens", ()), **args)
expr = sympify(expr)
if expr.is_Poly:
return Poly(expr, *gens, **args)
if "expand" not in args:
args["expand"] = False
opt = options.build_options(gens, args)
return _poly(expr, opt)
|
https://github.com/sympy/sympy/issues/12400
|
Traceback (most recent call last):
File "Temp.py", line 2, in <module>
print(sympy.polys.polytools.poly(1 / (1 + sympy.sqrt(2)), sympy.Dummy('x')))
File "sympy\polys\polytools.py", line 6855, in poly
return _poly(expr, opt)
File "sympy\polys\polytools.py", line 6805, in _poly
_poly(factor.base, opt).pow(factor.exp))
File "sympy\polys\polytools.py", line 1404, in pow
result = f.rep.pow(n)
File "sympy\polys\polyclasses.py", line 446, in pow
return f.per(dmp_pow(f.rep, n, f.lev, f.dom))
File "sympy\polys\densearith.py", line 978, in dmp_pow
return dup_pow(f, n, K)
File "sympy\polys\densearith.py", line 943, in dup_pow
raise ValueError("can't raise polynomial to a negative power")
ValueError: can't raise polynomial to a negative power
|
ValueError
|
def _poly(expr, opt):
terms, poly_terms = [], []
for term in Add.make_args(expr):
factors, poly_factors = [], []
for factor in Mul.make_args(term):
if factor.is_Add:
poly_factors.append(_poly(factor, opt))
elif (
factor.is_Pow
and factor.base.is_Add
and factor.exp.is_Integer
and factor.exp >= 0
):
poly_factors.append(_poly(factor.base, opt).pow(factor.exp))
else:
factors.append(factor)
if not poly_factors:
terms.append(term)
else:
product = poly_factors[0]
for factor in poly_factors[1:]:
product = product.mul(factor)
if factors:
factor = Mul(*factors)
if factor.is_Number:
product = product.mul(factor)
else:
product = product.mul(Poly._from_expr(factor, opt))
poly_terms.append(product)
if not poly_terms:
result = Poly._from_expr(expr, opt)
else:
result = poly_terms[0]
for term in poly_terms[1:]:
result = result.add(term)
if terms:
term = Add(*terms)
if term.is_Number:
result = result.add(term)
else:
result = result.add(Poly._from_expr(term, opt))
return result.reorder(*opt.get("gens", ()), **args)
|
def _poly(expr, opt):
terms, poly_terms = [], []
for term in Add.make_args(expr):
factors, poly_factors = [], []
for factor in Mul.make_args(term):
if factor.is_Add:
poly_factors.append(_poly(factor, opt))
elif factor.is_Pow and factor.base.is_Add and factor.exp.is_Integer:
poly_factors.append(_poly(factor.base, opt).pow(factor.exp))
else:
factors.append(factor)
if not poly_factors:
terms.append(term)
else:
product = poly_factors[0]
for factor in poly_factors[1:]:
product = product.mul(factor)
if factors:
factor = Mul(*factors)
if factor.is_Number:
product = product.mul(factor)
else:
product = product.mul(Poly._from_expr(factor, opt))
poly_terms.append(product)
if not poly_terms:
result = Poly._from_expr(expr, opt)
else:
result = poly_terms[0]
for term in poly_terms[1:]:
result = result.add(term)
if terms:
term = Add(*terms)
if term.is_Number:
result = result.add(term)
else:
result = result.add(Poly._from_expr(term, opt))
return result.reorder(*opt.get("gens", ()), **args)
|
https://github.com/sympy/sympy/issues/12400
|
Traceback (most recent call last):
File "Temp.py", line 2, in <module>
print(sympy.polys.polytools.poly(1 / (1 + sympy.sqrt(2)), sympy.Dummy('x')))
File "sympy\polys\polytools.py", line 6855, in poly
return _poly(expr, opt)
File "sympy\polys\polytools.py", line 6805, in _poly
_poly(factor.base, opt).pow(factor.exp))
File "sympy\polys\polytools.py", line 1404, in pow
result = f.rep.pow(n)
File "sympy\polys\polyclasses.py", line 446, in pow
return f.per(dmp_pow(f.rep, n, f.lev, f.dom))
File "sympy\polys\densearith.py", line 978, in dmp_pow
return dup_pow(f, n, K)
File "sympy\polys\densearith.py", line 943, in dup_pow
raise ValueError("can't raise polynomial to a negative power")
ValueError: can't raise polynomial to a negative power
|
ValueError
|
def __pow__(self, other, mod=None):
if mod is None:
return self._pow(other)
try:
_self, other, mod = as_int(self), as_int(other), as_int(mod)
if other >= 0:
return pow(_self, other, mod)
else:
from sympy.core.numbers import mod_inverse
return mod_inverse(pow(_self, -other, mod), mod)
except ValueError:
power = self._pow(other)
try:
return power % mod
except TypeError:
return NotImplemented
|
def __pow__(self, other):
return Pow(self, other)
|
https://github.com/sympy/sympy/issues/5715
|
According to http://docs.python.org/release/2.5.2/ref/numeric-types.html , __pow__ should define a third, optional argument to work with ternary pow(). We should do that for at least Integer, though it would be cool to do it for arbitrary expressions (this will require Mod from issue 5589 , and also some care to make sure that it still evaluates efficiently when values are substituted in). Right now, we get:
In [1]: pow(S(2), S(3), S(5))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/Users/aaronmeurer/Documents/python/sympy/sympy/<ipython console> in <module>()
TypeError: __sympifyit_wrapper() takes exactly 2 arguments (3 given)
|
TypeError
|
def make_args(cls, expr):
"""
Return a sequence of elements `args` such that cls(*args) == expr
>>> from sympy import Symbol, Mul, Add
>>> x, y = map(Symbol, 'xy')
>>> Mul.make_args(x*y)
(x, y)
>>> Add.make_args(x*y)
(x*y,)
>>> set(Add.make_args(x*y + y)) == set([y, x*y])
True
"""
if isinstance(expr, cls):
return expr.args
else:
return (sympify(expr),)
|
def make_args(cls, expr):
"""
Return a sequence of elements `args` such that cls(*args) == expr
>>> from sympy import Symbol, Mul, Add
>>> x, y = map(Symbol, 'xy')
>>> Mul.make_args(x*y)
(x, y)
>>> Add.make_args(x*y)
(x*y,)
>>> set(Add.make_args(x*y + y)) == set([y, x*y])
True
"""
if isinstance(expr, cls):
return expr.args
else:
return (expr,)
|
https://github.com/sympy/sympy/issues/7067
|
Following function call generate an error in Python3 but work well in Python2.
factor_list(x*(x+y))
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5675, in factor_list
return _generic_factor_list(f, gens, args, method='factor')
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5423, in _generic_factor_list
fp = _sorted_factors(fp, method)
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5322, in _sorted_factors
return sorted(factors, key=key)
TypeError: unorderable types: list() < int()
Similarly following calls generate the same error:
[1] factor_list(y*(x+y)), factor_list(x*y + y**2)
[2] factor_list(x*(x+y+z)), factor_list(x**2+x*y+x*z)
[3] factor_list((x + 1)*(x + y))
But the following don't:
[4] factor_list(x*(x**2 + y**2)), factor_list(x**3 + x*y**2)
[5] factor_list(x**3 - x), factor_list(x*(x**2 - 1))
[6] factor_list(x**2 + x*y + x + y)
[7] factor_list((x + z)*(x + y)), factor_list(x**2 + x*z + y*x + y*z)
Here [6] is mathematically same as [3]. But only [3] gives the error.
|
TypeError
|
def make_args(cls, expr):
"""
Return a set of args such that cls(*arg_set) == expr.
"""
if isinstance(expr, cls):
return expr._argset
else:
return frozenset([sympify(expr)])
|
def make_args(cls, expr):
"""
Return a sequence of elements `args` such that cls(*args) == expr
>>> from sympy import Symbol, Mul, Add
>>> x, y = map(Symbol, 'xy')
>>> Mul.make_args(x*y)
(x, y)
>>> Add.make_args(x*y)
(x*y,)
>>> set(Add.make_args(x*y + y)) == set([y, x*y])
True
"""
if isinstance(expr, cls):
return expr._argset
else:
return frozenset([expr])
|
https://github.com/sympy/sympy/issues/7067
|
Following function call generate an error in Python3 but work well in Python2.
factor_list(x*(x+y))
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5675, in factor_list
return _generic_factor_list(f, gens, args, method='factor')
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5423, in _generic_factor_list
fp = _sorted_factors(fp, method)
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5322, in _sorted_factors
return sorted(factors, key=key)
TypeError: unorderable types: list() < int()
Similarly following calls generate the same error:
[1] factor_list(y*(x+y)), factor_list(x*y + y**2)
[2] factor_list(x*(x+y+z)), factor_list(x**2+x*y+x*z)
[3] factor_list((x + 1)*(x + y))
But the following don't:
[4] factor_list(x*(x**2 + y**2)), factor_list(x**3 + x*y**2)
[5] factor_list(x**3 - x), factor_list(x*(x**2 - 1))
[6] factor_list(x**2 + x*y + x + y)
[7] factor_list((x + z)*(x + y)), factor_list(x**2 + x*z + y*x + y*z)
Here [6] is mathematically same as [3]. But only [3] gives the error.
|
TypeError
|
def _sorted_factors(factors, method):
"""Sort a list of ``(expr, exp)`` pairs."""
if method == "sqf":
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (exp, len(rep), len(poly.gens), rep)
else:
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (len(rep), len(poly.gens), exp, rep)
return sorted(factors, key=key)
|
def _sorted_factors(factors, method):
"""Sort a list of ``(expr, exp)`` pairs."""
if method == "sqf":
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (exp, len(rep), rep)
else:
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (len(rep), exp, rep)
return sorted(factors, key=key)
|
https://github.com/sympy/sympy/issues/7067
|
Following function call generate an error in Python3 but work well in Python2.
factor_list(x*(x+y))
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5675, in factor_list
return _generic_factor_list(f, gens, args, method='factor')
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5423, in _generic_factor_list
fp = _sorted_factors(fp, method)
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5322, in _sorted_factors
return sorted(factors, key=key)
TypeError: unorderable types: list() < int()
Similarly following calls generate the same error:
[1] factor_list(y*(x+y)), factor_list(x*y + y**2)
[2] factor_list(x*(x+y+z)), factor_list(x**2+x*y+x*z)
[3] factor_list((x + 1)*(x + y))
But the following don't:
[4] factor_list(x*(x**2 + y**2)), factor_list(x**3 + x*y**2)
[5] factor_list(x**3 - x), factor_list(x*(x**2 - 1))
[6] factor_list(x**2 + x*y + x + y)
[7] factor_list((x + z)*(x + y)), factor_list(x**2 + x*z + y*x + y*z)
Here [6] is mathematically same as [3]. But only [3] gives the error.
|
TypeError
|
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (len(rep), len(poly.gens), exp, rep)
|
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (len(rep), exp, rep)
|
https://github.com/sympy/sympy/issues/7067
|
Following function call generate an error in Python3 but work well in Python2.
factor_list(x*(x+y))
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5675, in factor_list
return _generic_factor_list(f, gens, args, method='factor')
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5423, in _generic_factor_list
fp = _sorted_factors(fp, method)
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5322, in _sorted_factors
return sorted(factors, key=key)
TypeError: unorderable types: list() < int()
Similarly following calls generate the same error:
[1] factor_list(y*(x+y)), factor_list(x*y + y**2)
[2] factor_list(x*(x+y+z)), factor_list(x**2+x*y+x*z)
[3] factor_list((x + 1)*(x + y))
But the following don't:
[4] factor_list(x*(x**2 + y**2)), factor_list(x**3 + x*y**2)
[5] factor_list(x**3 - x), factor_list(x*(x**2 - 1))
[6] factor_list(x**2 + x*y + x + y)
[7] factor_list((x + z)*(x + y)), factor_list(x**2 + x*z + y*x + y*z)
Here [6] is mathematically same as [3]. But only [3] gives the error.
|
TypeError
|
def _eval_imageset(self, f):
expr = f.expr
if not isinstance(expr, Expr):
return
if len(f.variables) > 1:
return
n = f.variables[0]
# f(x) + c and f(-x) + c cover the same integers
# so choose the form that has the fewest negatives
c = f(0)
fx = f(n) - c
f_x = f(-n) - c
neg_count = lambda e: sum(_coeff_isneg(_) for _ in Add.make_args(e))
if neg_count(f_x) < neg_count(fx):
expr = f_x + c
a = Wild("a", exclude=[n])
b = Wild("b", exclude=[n])
match = expr.match(a * n + b)
if match and match[a]:
# canonical shift
expr = match[a] * n + match[b] % match[a]
if expr != f.expr:
return ImageSet(Lambda(n, expr), S.Integers)
|
def _eval_imageset(self, f):
expr = f.expr
if len(f.variables) > 1:
return
n = f.variables[0]
a = Wild("a")
b = Wild("b")
match = expr.match(a * n + b)
if match[a].is_negative:
expr = -expr
match = expr.match(a * n + b)
if match[a] is S.One and match[b].is_integer:
expr = expr - match[b]
return ImageSet(Lambda(n, expr), S.Integers)
|
https://github.com/sympy/sympy/issues/7067
|
Following function call generate an error in Python3 but work well in Python2.
factor_list(x*(x+y))
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5675, in factor_list
return _generic_factor_list(f, gens, args, method='factor')
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5423, in _generic_factor_list
fp = _sorted_factors(fp, method)
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5322, in _sorted_factors
return sorted(factors, key=key)
TypeError: unorderable types: list() < int()
Similarly following calls generate the same error:
[1] factor_list(y*(x+y)), factor_list(x*y + y**2)
[2] factor_list(x*(x+y+z)), factor_list(x**2+x*y+x*z)
[3] factor_list((x + 1)*(x + y))
But the following don't:
[4] factor_list(x*(x**2 + y**2)), factor_list(x**3 + x*y**2)
[5] factor_list(x**3 - x), factor_list(x*(x**2 - 1))
[6] factor_list(x**2 + x*y + x + y)
[7] factor_list((x + z)*(x + y)), factor_list(x**2 + x*z + y*x + y*z)
Here [6] is mathematically same as [3]. But only [3] gives the error.
|
TypeError
|
def _contains(self, other):
from sympy.matrices import Matrix
from sympy.solvers.solveset import solveset, linsolve
from sympy.utilities.iterables import is_sequence, iterable, cartes
L = self.lamda
if is_sequence(other):
if not is_sequence(L.expr):
return S.false
if len(L.expr) != len(other):
raise ValueError(
filldedent("""
Dimensions of other and output of Lambda are different.""")
)
elif iterable(other):
raise ValueError(
filldedent("""
`other` should be an ordered object like a Tuple.""")
)
solns = None
if self._is_multivariate():
if not is_sequence(L.expr):
# exprs -> (numer, denom) and check again
# XXX this is a bad idea -- make the user
# remap self to desired form
return other.as_numer_denom() in self.func(
Lambda(L.variables, L.expr.as_numer_denom()), self.base_set
)
eqs = [expr - val for val, expr in zip(other, L.expr)]
variables = L.variables
free = set(variables)
if all(i.is_number for i in list(Matrix(eqs).jacobian(variables))):
solns = list(
linsolve([e - val for e, val in zip(L.expr, other)], variables)
)
else:
syms = [e.free_symbols & free for e in eqs]
solns = {}
for i, (e, s, v) in enumerate(zip(eqs, syms, other)):
if not s:
if e != v:
return S.false
solns[vars[i]] = [v]
continue
elif len(s) == 1:
sy = s.pop()
sol = solveset(e, sy)
if sol is S.EmptySet:
return S.false
elif isinstance(sol, FiniteSet):
solns[sy] = list(sol)
else:
raise NotImplementedError
else:
raise NotImplementedError
solns = cartes(*[solns[s] for s in variables])
else:
x = L.variables[0]
if isinstance(L.expr, Expr):
# scalar -> scalar mapping
solnsSet = solveset(L.expr - other, x)
if solnsSet.is_FiniteSet:
solns = list(solnsSet)
else:
msgset = solnsSet
else:
# scalar -> vector
for e, o in zip(L.expr, other):
solns = solveset(e - o, x)
if solns is S.EmptySet:
return S.false
for soln in solns:
try:
if soln in self.base_set:
break # check next pair
except TypeError:
if self.base_set.contains(soln.evalf()):
break
else:
return S.false # never broke so there was no True
return S.true
if solns is None:
raise NotImplementedError(
filldedent(
"""
Determining whether %s contains %s has not
been implemented."""
% (msgset, other)
)
)
for soln in solns:
try:
if soln in self.base_set:
return S.true
except TypeError:
return self.base_set.contains(soln.evalf())
return S.false
|
def _contains(self, other):
from sympy.matrices import Matrix
from sympy.solvers.solveset import solveset, linsolve
from sympy.utilities.iterables import iterable, cartes
L = self.lamda
if self._is_multivariate():
if not iterable(L.expr):
if iterable(other):
return S.false
return other.as_numer_denom() in self.func(
Lambda(L.variables, L.expr.as_numer_denom()), self.base_set
)
if len(L.expr) != len(self.lamda.variables):
raise NotImplementedError(
filldedent("""
Dimensions of input and output of Lambda are different.""")
)
eqs = [expr - val for val, expr in zip(other, L.expr)]
variables = L.variables
free = set(variables)
if all(i.is_number for i in list(Matrix(eqs).jacobian(variables))):
solns = list(
linsolve([e - val for e, val in zip(L.expr, other)], variables)
)
else:
syms = [e.free_symbols & free for e in eqs]
solns = {}
for i, (e, s, v) in enumerate(zip(eqs, syms, other)):
if not s:
if e != v:
return S.false
solns[vars[i]] = [v]
continue
elif len(s) == 1:
sy = s.pop()
sol = solveset(e, sy)
if sol is S.EmptySet:
return S.false
elif isinstance(sol, FiniteSet):
solns[sy] = list(sol)
else:
raise NotImplementedError
else:
raise NotImplementedError
solns = cartes(*[solns[s] for s in variables])
else:
# assume scalar -> scalar mapping
solnsSet = solveset(L.expr - other, L.variables[0])
if solnsSet.is_FiniteSet:
solns = list(solnsSet)
else:
raise NotImplementedError(
filldedent(
"""
Determining whether an ImageSet contains %s has not
been implemented."""
% func_name(other)
)
)
for soln in solns:
try:
if soln in self.base_set:
return S.true
except TypeError:
return self.base_set.contains(soln.evalf())
return S.false
|
https://github.com/sympy/sympy/issues/7067
|
Following function call generate an error in Python3 but work well in Python2.
factor_list(x*(x+y))
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5675, in factor_list
return _generic_factor_list(f, gens, args, method='factor')
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5423, in _generic_factor_list
fp = _sorted_factors(fp, method)
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5322, in _sorted_factors
return sorted(factors, key=key)
TypeError: unorderable types: list() < int()
Similarly following calls generate the same error:
[1] factor_list(y*(x+y)), factor_list(x*y + y**2)
[2] factor_list(x*(x+y+z)), factor_list(x**2+x*y+x*z)
[3] factor_list((x + 1)*(x + y))
But the following don't:
[4] factor_list(x*(x**2 + y**2)), factor_list(x**3 + x*y**2)
[5] factor_list(x**3 - x), factor_list(x*(x**2 - 1))
[6] factor_list(x**2 + x*y + x + y)
[7] factor_list((x + z)*(x + y)), factor_list(x**2 + x*z + y*x + y*z)
Here [6] is mathematically same as [3]. But only [3] gives the error.
|
TypeError
|
def _intersect(self, other):
from sympy.solvers.diophantine import diophantine
if self.base_set is S.Integers:
g = None
if isinstance(other, ImageSet) and other.base_set is S.Integers:
g = other.lamda.expr
m = other.lamda.variables[0]
elif other is S.Integers:
m = g = Dummy("x")
if g is not None:
f = self.lamda.expr
n = self.lamda.variables[0]
# Diophantine sorts the solutions according to the alphabetic
# order of the variable names, since the result should not depend
# on the variable name, they are replaced by the dummy variables
# below
a, b = Dummy("a"), Dummy("b")
f, g = f.subs(n, a), g.subs(m, b)
solns_set = diophantine(f - g)
if solns_set == set():
return EmptySet()
solns = list(diophantine(f - g))
if len(solns) != 1:
return
# since 'a' < 'b', select soln for n
nsol = solns[0][0]
t = nsol.free_symbols.pop()
return imageset(Lambda(n, f.subs(a, nsol.subs(t, n))), S.Integers)
if other == S.Reals:
from sympy.solvers.solveset import solveset_real
from sympy.core.function import expand_complex
if len(self.lamda.variables) > 1:
return None
f = self.lamda.expr
n = self.lamda.variables[0]
n_ = Dummy(n.name, real=True)
f_ = f.subs(n, n_)
re, im = f_.as_real_imag()
im = expand_complex(im)
return imageset(Lambda(n_, re), self.base_set.intersect(solveset_real(im, n_)))
|
def _intersect(self, other):
from sympy.solvers.diophantine import diophantine
if self.base_set is S.Integers:
if isinstance(other, ImageSet) and other.base_set is S.Integers:
f, g = self.lamda.expr, other.lamda.expr
n, m = self.lamda.variables[0], other.lamda.variables[0]
# Diophantine sorts the solutions according to the alphabetic
# order of the variable names, since the result should not depend
# on the variable name, they are replaced by the dummy variables
# below
a, b = Dummy("a"), Dummy("b")
f, g = f.subs(n, a), g.subs(m, b)
solns_set = diophantine(f - g)
if solns_set == set():
return EmptySet()
solns = list(diophantine(f - g))
if len(solns) == 1:
t = list(solns[0][0].free_symbols)[0]
else:
return None
# since 'a' < 'b'
return imageset(Lambda(t, f.subs(a, solns[0][0])), S.Integers)
if other == S.Reals:
from sympy.solvers.solveset import solveset_real
from sympy.core.function import expand_complex
if len(self.lamda.variables) > 1:
return None
f = self.lamda.expr
n = self.lamda.variables[0]
n_ = Dummy(n.name, real=True)
f_ = f.subs(n, n_)
re, im = f_.as_real_imag()
im = expand_complex(im)
return imageset(Lambda(n_, re), self.base_set.intersect(solveset_real(im, n_)))
|
https://github.com/sympy/sympy/issues/7067
|
Following function call generate an error in Python3 but work well in Python2.
factor_list(x*(x+y))
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5675, in factor_list
return _generic_factor_list(f, gens, args, method='factor')
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5423, in _generic_factor_list
fp = _sorted_factors(fp, method)
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5322, in _sorted_factors
return sorted(factors, key=key)
TypeError: unorderable types: list() < int()
Similarly following calls generate the same error:
[1] factor_list(y*(x+y)), factor_list(x*y + y**2)
[2] factor_list(x*(x+y+z)), factor_list(x**2+x*y+x*z)
[3] factor_list((x + 1)*(x + y))
But the following don't:
[4] factor_list(x*(x**2 + y**2)), factor_list(x**3 + x*y**2)
[5] factor_list(x**3 - x), factor_list(x*(x**2 - 1))
[6] factor_list(x**2 + x*y + x + y)
[7] factor_list((x + z)*(x + y)), factor_list(x**2 + x*z + y*x + y*z)
Here [6] is mathematically same as [3]. But only [3] gives the error.
|
TypeError
|
def _intersect(self, other):
from sympy.functions.elementary.integers import ceiling, floor
from sympy.functions.elementary.complexes import sign
if other is S.Naturals:
return self._intersect(Interval(1, S.Infinity))
if other is S.Integers:
return self
if other.is_Interval:
if not all(i.is_number for i in other.args[:2]):
return
# trim down to self's size, and represent
# as a Range with step 1
start = ceiling(max(other.inf, self.inf))
if start not in other:
start += 1
end = floor(min(other.sup, self.sup))
if end not in other:
end -= 1
return self.intersect(Range(start, end + 1))
if isinstance(other, Range):
from sympy.solvers.diophantine import diop_linear
from sympy.core.numbers import ilcm
# non-overlap quick exits
if not other:
return S.EmptySet
if not self:
return S.EmptySet
if other.sup < self.inf:
return S.EmptySet
if other.inf > self.sup:
return S.EmptySet
# work with finite end at the start
r1 = self
if r1.start.is_infinite:
r1 = r1.reversed
r2 = other
if r2.start.is_infinite:
r2 = r2.reversed
# this equation represents the values of the Range;
# it's a linear equation
eq = lambda r, i: r.start + i * r.step
# we want to know when the two equations might
# have integer solutions so we use the diophantine
# solver
a, b = diop_linear(eq(r1, Dummy()) - eq(r2, Dummy()))
# check for no solution
no_solution = a is None and b is None
if no_solution:
return S.EmptySet
# there is a solution
# -------------------
# find the coincident point, c
a0 = a.as_coeff_Add()[0]
c = eq(r1, a0)
# find the first point, if possible, in each range
# since c may not be that point
def _first_finite_point(r1, c):
if c == r1.start:
return c
# st is the signed step we need to take to
# get from c to r1.start
st = sign(r1.start - c) * step
# use Range to calculate the first point:
# we want to get as close as possible to
# r1.start; the Range will not be null since
# it will at least contain c
s1 = Range(c, r1.start + st, st)[-1]
if s1 == r1.start:
pass
else:
# if we didn't hit r1.start then, if the
# sign of st didn't match the sign of r1.step
# we are off by one and s1 is not in r1
if sign(r1.step) != sign(st):
s1 -= st
if s1 not in r1:
return
return s1
# calculate the step size of the new Range
step = abs(ilcm(r1.step, r2.step))
s1 = _first_finite_point(r1, c)
if s1 is None:
return S.EmptySet
s2 = _first_finite_point(r2, c)
if s2 is None:
return S.EmptySet
# replace the corresponding start or stop in
# the original Ranges with these points; the
# result must have at least one point since
# we know that s1 and s2 are in the Ranges
def _updated_range(r, first):
st = sign(r.step) * step
if r.start.is_finite:
rv = Range(first, r.stop, st)
else:
rv = Range(r.start, first + st, st)
return rv
r1 = _updated_range(self, s1)
r2 = _updated_range(other, s2)
# work with them both in the increasing direction
if sign(r1.step) < 0:
r1 = r1.reversed
if sign(r2.step) < 0:
r2 = r2.reversed
# return clipped Range with positive step; it
# can't be empty at this point
start = max(r1.start, r2.start)
stop = min(r1.stop, r2.stop)
return Range(start, stop, step)
else:
return
|
def _intersect(self, other):
from sympy.functions.elementary.integers import ceiling, floor
from sympy.functions.elementary.complexes import sign
if other is S.Naturals:
return self._intersect(Interval(1, S.Infinity))
if other is S.Integers:
return self
if other.is_Interval:
if not all(i.is_number for i in other.args[:2]):
return
o = other.intersect(Interval(self.inf, self.sup))
if o is S.EmptySet:
return o
# get inf/sup and handle below
if isinstance(o, FiniteSet):
assert len(o) == 1
inf = sup = list(o)[0]
else:
assert isinstance(o, Interval)
sup = o.sup
inf = o.inf
# get onto sequence
step = abs(self.step)
ref = self.start if self.start.is_finite else self.stop
a = ref + ceiling((inf - ref) / step) * step
if a not in other:
a += step
b = ref + floor((sup - ref) / step) * step
if b not in other:
b -= step
if self.step < 0:
a, b = b, a
# make sure to include end point
b += self.step
rv = Range(a, b, self.step)
if not rv:
return S.EmptySet
return rv
elif isinstance(other, Range):
from sympy.solvers.diophantine import diop_linear
from sympy.core.numbers import ilcm
# non-overlap quick exits
if not other:
return S.EmptySet
if not self:
return S.EmptySet
if other.sup < self.inf:
return S.EmptySet
if other.inf > self.sup:
return S.EmptySet
# work with finite end at the start
r1 = self
if r1.start.is_infinite:
r1 = r1.reversed
r2 = other
if r2.start.is_infinite:
r2 = r2.reversed
# this equation represents the values of the Range;
# it's a linear equation
eq = lambda r, i: r.start + i * r.step
# we want to know when the two equations might
# have integer solutions so we use the diophantine
# solver
a, b = diop_linear(eq(r1, Dummy()) - eq(r2, Dummy()))
# check for no solution
no_solution = a is None and b is None
if no_solution:
return S.EmptySet
# there is a solution
# -------------------
# find the coincident point, c
a0 = a.as_coeff_Add()[0]
c = eq(r1, a0)
# find the first point, if possible, in each range
# since c may not be that point
def _first_finite_point(r1, c):
if c == r1.start:
return c
# st is the signed step we need to take to
# get from c to r1.start
st = sign(r1.start - c) * step
# use Range to calculate the first point:
# we want to get as close as possible to
# r1.start; the Range will not be null since
# it will at least contain c
s1 = Range(c, r1.start + st, st)[-1]
if s1 == r1.start:
pass
else:
# if we didn't hit r1.start then, if the
# sign of st didn't match the sign of r1.step
# we are off by one and s1 is not in r1
if sign(r1.step) != sign(st):
s1 -= st
if s1 not in r1:
return
return s1
# calculate the step size of the new Range
step = abs(ilcm(r1.step, r2.step))
s1 = _first_finite_point(r1, c)
if s1 is None:
return S.EmptySet
s2 = _first_finite_point(r2, c)
if s2 is None:
return S.EmptySet
# replace the corresponding start or stop in
# the original Ranges with these points; the
# result must have at least one point since
# we know that s1 and s2 are in the Ranges
def _updated_range(r, first):
st = sign(r.step) * step
if r.start.is_finite:
rv = Range(first, r.stop, st)
else:
rv = Range(r.start, first + st, st)
return rv
r1 = _updated_range(self, s1)
r2 = _updated_range(other, s2)
# work with them both in the increasing direction
if sign(r1.step) < 0:
r1 = r1.reversed
if sign(r2.step) < 0:
r2 = r2.reversed
# return clipped Range with positive step; it
# can't be empty at this point
start = max(r1.start, r2.start)
stop = min(r1.stop, r2.stop)
return Range(start, stop, step)
else:
return
|
https://github.com/sympy/sympy/issues/7067
|
Following function call generate an error in Python3 but work well in Python2.
factor_list(x*(x+y))
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5675, in factor_list
return _generic_factor_list(f, gens, args, method='factor')
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5423, in _generic_factor_list
fp = _sorted_factors(fp, method)
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5322, in _sorted_factors
return sorted(factors, key=key)
TypeError: unorderable types: list() < int()
Similarly following calls generate the same error:
[1] factor_list(y*(x+y)), factor_list(x*y + y**2)
[2] factor_list(x*(x+y+z)), factor_list(x**2+x*y+x*z)
[3] factor_list((x + 1)*(x + y))
But the following don't:
[4] factor_list(x*(x**2 + y**2)), factor_list(x**3 + x*y**2)
[5] factor_list(x**3 - x), factor_list(x*(x**2 - 1))
[6] factor_list(x**2 + x*y + x + y)
[7] factor_list((x + z)*(x + y)), factor_list(x**2 + x*z + y*x + y*z)
Here [6] is mathematically same as [3]. But only [3] gives the error.
|
TypeError
|
def imageset(*args):
r"""
Return an image of the set under transformation ``f``.
If this function can't compute the image, it returns an
unevaluated ImageSet object.
.. math::
{ f(x) | x \in self }
Examples
========
>>> from sympy import S, Interval, Symbol, imageset, sin, Lambda
>>> from sympy.abc import x, y
>>> imageset(x, 2*x, Interval(0, 2))
[0, 4]
>>> imageset(lambda x: 2*x, Interval(0, 2))
[0, 4]
>>> imageset(Lambda(x, sin(x)), Interval(-2, 1))
ImageSet(Lambda(x, sin(x)), [-2, 1])
>>> imageset(sin, Interval(-2, 1))
ImageSet(Lambda(x, sin(x)), [-2, 1])
>>> imageset(lambda y: x + y, Interval(-2, 1))
ImageSet(Lambda(_x, _x + x), [-2, 1])
Expressions applied to the set of Integers are simplified
to show as few negatives as possible and linear expressions
are converted to a canonical form. If this is not desirable
then the unevaluated ImageSet should be used.
>>> imageset(x, -2*x + 5, S.Integers)
ImageSet(Lambda(x, 2*x + 1), Integers())
See Also
========
sympy.sets.fancysets.ImageSet
"""
from sympy.core import Lambda
from sympy.sets.fancysets import ImageSet
from sympy.geometry.util import _uniquely_named_symbol
if len(args) not in (2, 3):
raise ValueError("imageset expects 2 or 3 args, got: %s" % len(args))
set = args[-1]
if not isinstance(set, Set):
name = func_name(set)
raise ValueError("last argument should be a set, not %s" % name)
if len(args) == 3:
f = Lambda(*args[:2])
elif len(args) == 2:
f = args[0]
if isinstance(f, Lambda):
pass
elif (
isinstance(f, FunctionClass) # like cos
or func_name(f) == "<lambda>"
):
var = _uniquely_named_symbol(Symbol("x"), f(Dummy()))
expr = f(var)
f = Lambda(var, expr)
else:
raise TypeError(
filldedent(
"""
expecting lambda, Lambda, or FunctionClass, not \'%s\'"""
% func_name(f)
)
)
r = set._eval_imageset(f)
if isinstance(r, ImageSet):
f, set = r.args
if f.variables[0] == f.expr:
return set
if isinstance(set, ImageSet):
if len(set.lamda.variables) == 1 and len(f.variables) == 1:
return imageset(
Lambda(
set.lamda.variables[0], f.expr.subs(f.variables[0], set.lamda.expr)
),
set.base_set,
)
if r is not None:
return r
return ImageSet(f, set)
|
def imageset(*args):
r"""
Return an image of the set under transformation ``f``.
If this function can't compute the image, it returns an
unevaluated ImageSet object.
.. math::
{ f(x) | x \in self }
Examples
========
>>> from sympy import Interval, Symbol, imageset, sin, Lambda
>>> from sympy.abc import x, y
>>> imageset(x, 2*x, Interval(0, 2))
[0, 4]
>>> imageset(lambda x: 2*x, Interval(0, 2))
[0, 4]
>>> imageset(Lambda(x, sin(x)), Interval(-2, 1))
ImageSet(Lambda(x, sin(x)), [-2, 1])
>>> imageset(sin, Interval(-2, 1))
ImageSet(Lambda(x, sin(x)), [-2, 1])
>>> imageset(lambda y: x + y, Interval(-2, 1))
ImageSet(Lambda(_x, _x + x), [-2, 1])
See Also
========
sympy.sets.fancysets.ImageSet
"""
from sympy.core import Lambda
from sympy.sets.fancysets import ImageSet
from sympy.geometry.util import _uniquely_named_symbol
if len(args) not in (2, 3):
raise ValueError("imageset expects 2 or 3 args, got: %s" % len(args))
set = args[-1]
if not isinstance(set, Set):
name = func_name(set)
raise ValueError("last argument should be a set, not %s" % name)
if len(args) == 3:
f = Lambda(*args[:2])
elif len(args) == 2:
f = args[0]
if isinstance(f, Lambda):
pass
elif (
isinstance(f, FunctionClass) # like cos
or func_name(f) == "<lambda>"
):
var = _uniquely_named_symbol(Symbol("x"), f(Dummy()))
expr = f(var)
f = Lambda(var, expr)
else:
raise TypeError(
filldedent(
"""
expecting lambda, Lambda, or FunctionClass, not \'%s\'"""
% func_name(f)
)
)
r = set._eval_imageset(f)
if isinstance(r, ImageSet):
f, set = r.args
if f.variables[0] == f.expr:
return set
if isinstance(set, ImageSet):
if len(set.lamda.variables) == 1 and len(f.variables) == 1:
return imageset(
Lambda(
set.lamda.variables[0], f.expr.subs(f.variables[0], set.lamda.expr)
),
set.base_set,
)
if r is not None:
return r
return ImageSet(f, set)
|
https://github.com/sympy/sympy/issues/7067
|
Following function call generate an error in Python3 but work well in Python2.
factor_list(x*(x+y))
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5675, in factor_list
return _generic_factor_list(f, gens, args, method='factor')
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5423, in _generic_factor_list
fp = _sorted_factors(fp, method)
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5322, in _sorted_factors
return sorted(factors, key=key)
TypeError: unorderable types: list() < int()
Similarly following calls generate the same error:
[1] factor_list(y*(x+y)), factor_list(x*y + y**2)
[2] factor_list(x*(x+y+z)), factor_list(x**2+x*y+x*z)
[3] factor_list((x + 1)*(x + y))
But the following don't:
[4] factor_list(x*(x**2 + y**2)), factor_list(x**3 + x*y**2)
[5] factor_list(x**3 - x), factor_list(x*(x**2 - 1))
[6] factor_list(x**2 + x*y + x + y)
[7] factor_list((x + z)*(x + y)), factor_list(x**2 + x*z + y*x + y*z)
Here [6] is mathematically same as [3]. But only [3] gives the error.
|
TypeError
|
def diophantine(eq, param=symbols("t", integer=True)):
"""
Simplify the solution procedure of diophantine equation ``eq`` by
converting it into a product of terms which should equal zero.
For example, when solving, `x^2 - y^2 = 0` this is treated as
`(x + y)(x - y) = 0` and `x+y = 0` and `x-y = 0` are solved independently
and combined. Each term is solved by calling ``diop_solve()``.
Output of ``diophantine()`` is a set of tuples. Each tuple represents a
solution of the input equation. In a tuple, solution for each variable is
listed according to the alphabetic order of input variables. i.e. if we have
an equation with two variables `a` and `b`, first element of the tuple will
give the solution for `a` and the second element will give the solution for
`b`.
Usage
=====
``diophantine(eq, t)``: Solve the diophantine equation ``eq``.
``t`` is the parameter to be used by ``diop_solve()``.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``t`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import diophantine
>>> from sympy.abc import x, y, z
>>> diophantine(x**2 - y**2)
set([(-t_0, -t_0), (t_0, -t_0)])
#>>> diophantine(x*(2*x + 3*y - z))
#set([(0, n1, n2), (3*t - z, -2*t + z, z)])
#>>> diophantine(x**2 + 3*x*y + 4*x)
#set([(0, n1), (3*t - 4, -t)])
See Also
========
diop_solve()
"""
if isinstance(eq, Eq):
eq = eq.lhs - eq.rhs
eq = Poly(eq).as_expr()
if not eq.is_polynomial() or eq.is_number:
raise TypeError("Equation input format not supported")
var = list(eq.expand(force=True).free_symbols)
var.sort(key=default_sort_key)
terms = factor_list(eq)[1]
sols = set([])
for term in terms:
base = term[0]
var_t, jnk, eq_type = classify_diop(base)
if not var_t:
continue
solution = diop_solve(base, param)
if eq_type in [
"linear",
"homogeneous_ternary_quadratic",
"general_pythagorean",
]:
if merge_solution(var, var_t, solution) != ():
sols.add(merge_solution(var, var_t, solution))
elif eq_type in ["binary_quadratic", "general_sum_of_squares", "univariate"]:
for sol in solution:
if merge_solution(var, var_t, sol) != ():
sols.add(merge_solution(var, var_t, sol))
return sols
|
def diophantine(eq, param=symbols("t", integer=True)):
"""
Simplify the solution procedure of diophantine equation ``eq`` by
converting it into a product of terms which should equal zero.
For example, when solving, `x^2 - y^2 = 0` this is treated as
`(x + y)(x - y) = 0` and `x+y = 0` and `x-y = 0` are solved independently
and combined. Each term is solved by calling ``diop_solve()``.
Output of ``diophantine()`` is a set of tuples. Each tuple represents a
solution of the input equation. In a tuple, solution for each variable is
listed according to the alphabetic order of input variables. i.e. if we have
an equation with two variables `a` and `b`, first element of the tuple will
give the solution for `a` and the second element will give the solution for
`b`.
Usage
=====
``diophantine(eq, t)``: Solve the diophantine equation ``eq``.
``t`` is the parameter to be used by ``diop_solve()``.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``t`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import diophantine
>>> from sympy.abc import x, y, z
>>> diophantine(x**2 - y**2)
set([(-t_0, -t_0), (t_0, -t_0)])
#>>> diophantine(x*(2*x + 3*y - z))
#set([(0, n1, n2), (3*t - z, -2*t + z, z)])
#>>> diophantine(x**2 + 3*x*y + 4*x)
#set([(0, n1), (3*t - 4, -t)])
See Also
========
diop_solve()
"""
if isinstance(eq, Eq):
eq = eq.lhs - eq.rhs
eq = Poly(eq).as_expr()
if not eq.is_polynomial() or eq.is_number:
raise TypeError("Equation input format not supported")
var = list(eq.expand(force=True).free_symbols)
var.sort(key=default_sort_key)
terms = factor_list(eq)[1]
sols = set([])
for term in terms:
base = term[0]
var_t, jnk, eq_type = classify_diop(base)
solution = diop_solve(base, param)
if eq_type in [
"linear",
"homogeneous_ternary_quadratic",
"general_pythagorean",
]:
if merge_solution(var, var_t, solution) != ():
sols.add(merge_solution(var, var_t, solution))
elif eq_type in ["binary_quadratic", "general_sum_of_squares", "univariate"]:
for sol in solution:
if merge_solution(var, var_t, sol) != ():
sols.add(merge_solution(var, var_t, sol))
return sols
|
https://github.com/sympy/sympy/issues/7067
|
Following function call generate an error in Python3 but work well in Python2.
factor_list(x*(x+y))
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5675, in factor_list
return _generic_factor_list(f, gens, args, method='factor')
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5423, in _generic_factor_list
fp = _sorted_factors(fp, method)
File "/home/thilinarmtb/GSoC/sympy/sympy/polys/polytools.py", line 5322, in _sorted_factors
return sorted(factors, key=key)
TypeError: unorderable types: list() < int()
Similarly following calls generate the same error:
[1] factor_list(y*(x+y)), factor_list(x*y + y**2)
[2] factor_list(x*(x+y+z)), factor_list(x**2+x*y+x*z)
[3] factor_list((x + 1)*(x + y))
But the following don't:
[4] factor_list(x*(x**2 + y**2)), factor_list(x**3 + x*y**2)
[5] factor_list(x**3 - x), factor_list(x*(x**2 - 1))
[6] factor_list(x**2 + x*y + x + y)
[7] factor_list((x + z)*(x + y)), factor_list(x**2 + x*z + y*x + y*z)
Here [6] is mathematically same as [3]. But only [3] gives the error.
|
TypeError
|
def doit(self, **hints):
if self.args[0].is_zero is False:
return self.args[0] / Abs(self.args[0])
return self
|
def doit(self):
if self.args[0].is_zero is False:
return self.args[0] / Abs(self.args[0])
return self
|
https://github.com/sympy/sympy/issues/7163
|
In [63]: integrate((sign(x - 1) - sign(x - 2))*cos(x), x)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-63-4e67770ec27d> in <module>()
----> 1 integrate((sign(x - 1) - sign(x - 2))*cos(x), x)
/home/pape/sympy/sympy/utilities/decorator.pyc in threaded_func(expr, *args, **kwargs)
33 func(expr.rhs, *args, **kwargs))
34 else:
---> 35 return func(expr, *args, **kwargs)
36
37 return threaded_func
/home/pape/sympy/sympy/integrals/integrals.pyc in integrate(*args, **kwargs)
1638 if isinstance(integral, Integral):
1639 return integral.doit(deep=False, meijerg=meijerg, conds=conds,
-> 1640 risch=risch, manual=manual)
1641 else:
1642 return integral
/home/pape/sympy/sympy/integrals/integrals.pyc in doit(self, **hints)
892 function, xab[0],
893 meijerg=meijerg1, risch=risch, manual=manual,
--> 894 conds=conds)
895 if antideriv is None and meijerg1 is True:
896 ret = try_meijerg(function, xab)
/home/pape/sympy/sympy/integrals/integrals.pyc in _eval_integral(self, f, x, meijerg, risch, manual, conds)
1298 result = result.func(*[
1299 arg.doit(manual=False) if arg.has(Integral) else arg
-> 1300 for arg in result.args
1301 ]).expand(multinomial=False,
1302 log=False,
/home/pape/sympy/sympy/integrals/integrals.pyc in doit(self, **hints)
809 function = self.function
810 if deep:
--> 811 function = function.doit(**hints)
812
813 if function.is_zero:
/home/pape/sympy/sympy/core/basic.pyc in doit(self, **hints)
1533 if hints.get('deep', True):
1534 terms = [ term.doit(**hints) if isinstance(term, Basic) else term
-> 1535 for term in self.args ]
1536 return self.func(*terms)
1537 else:
TypeError: doit() got an unexpected keyword argument 'manual'
|
TypeError
|
def __new__(cls, *args):
"""
Construct a new instance of Diagram.
If no arguments are supplied, an empty diagram is created.
If at least an argument is supplied, ``args[0]`` is
interpreted as the premises of the diagram. If ``args[0]`` is
a list, it is interpreted as a list of :class:`Morphism`'s, in
which each :class:`Morphism` has an empty set of properties.
If ``args[0]`` is a Python dictionary or a :class:`Dict`, it
is interpreted as a dictionary associating to some
:class:`Morphism`'s some properties.
If at least two arguments are supplied ``args[1]`` is
interpreted as the conclusions of the diagram. The type of
``args[1]`` is interpreted in exactly the same way as the type
of ``args[0]``. If only one argument is supplied, the diagram
has no conclusions.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> from sympy.categories import IdentityMorphism, Diagram
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g])
>>> IdentityMorphism(A) in d.premises.keys()
True
>>> g * f in d.premises.keys()
True
>>> d = Diagram([f, g], {g * f: "unique"})
>>> d.conclusions[g * f]
{unique}
"""
premises = {}
conclusions = {}
# Here we will keep track of the objects which appear in the
# premises.
objects = EmptySet()
if len(args) >= 1:
# We've got some premises in the arguments.
premises_arg = args[0]
if isinstance(premises_arg, list):
# The user has supplied a list of morphisms, none of
# which have any attributes.
empty = EmptySet()
for morphism in premises_arg:
objects |= FiniteSet(morphism.domain, morphism.codomain)
Diagram._add_morphism_closure(premises, morphism, empty)
elif isinstance(premises_arg, dict) or isinstance(premises_arg, Dict):
# The user has supplied a dictionary of morphisms and
# their properties.
for morphism, props in premises_arg.items():
objects |= FiniteSet(morphism.domain, morphism.codomain)
Diagram._add_morphism_closure(
premises,
morphism,
FiniteSet(*props) if iterable(props) else FiniteSet(props),
)
if len(args) >= 2:
# We also have some conclusions.
conclusions_arg = args[1]
if isinstance(conclusions_arg, list):
# The user has supplied a list of morphisms, none of
# which have any attributes.
empty = EmptySet()
for morphism in conclusions_arg:
# Check that no new objects appear in conclusions.
if (objects.contains(morphism.domain) == S.true) and (
objects.contains(morphism.codomain) == S.true
):
# No need to add identities and recurse
# composites this time.
Diagram._add_morphism_closure(
conclusions,
morphism,
empty,
add_identities=False,
recurse_composites=False,
)
elif isinstance(conclusions_arg, dict) or isinstance(conclusions_arg, Dict):
# The user has supplied a dictionary of morphisms and
# their properties.
for morphism, props in conclusions_arg.items():
# Check that no new objects appear in conclusions.
if (morphism.domain in objects) and (morphism.codomain in objects):
# No need to add identities and recurse
# composites this time.
Diagram._add_morphism_closure(
conclusions,
morphism,
FiniteSet(*props) if iterable(props) else FiniteSet(props),
add_identities=False,
recurse_composites=False,
)
return Basic.__new__(cls, Dict(premises), Dict(conclusions), objects)
|
def __new__(cls, *args):
"""
Construct a new instance of Diagram.
If no arguments are supplied, an empty diagram is created.
If at least an argument is supplied, ``args[0]`` is
interpreted as the premises of the diagram. If ``args[0]`` is
a list, it is interpreted as a list of :class:`Morphism`'s, in
which each :class:`Morphism` has an empty set of properties.
If ``args[0]`` is a Python dictionary or a :class:`Dict`, it
is interpreted as a dictionary associating to some
:class:`Morphism`'s some properties.
If at least two arguments are supplied ``args[1]`` is
interpreted as the conclusions of the diagram. The type of
``args[1]`` is interpreted in exactly the same way as the type
of ``args[0]``. If only one argument is supplied, the diagram
has no conclusions.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> from sympy.categories import IdentityMorphism, Diagram
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g])
>>> IdentityMorphism(A) in d.premises.keys()
True
>>> g * f in d.premises.keys()
True
>>> d = Diagram([f, g], {g * f: "unique"})
>>> d.conclusions[g * f]
{unique}
"""
premises = {}
conclusions = {}
# Here we will keep track of the objects which appear in the
# premises.
objects = EmptySet()
if len(args) >= 1:
# We've got some premises in the arguments.
premises_arg = args[0]
if isinstance(premises_arg, list):
# The user has supplied a list of morphisms, none of
# which have any attributes.
empty = EmptySet()
for morphism in premises_arg:
objects |= FiniteSet(morphism.domain, morphism.codomain)
Diagram._add_morphism_closure(premises, morphism, empty)
elif isinstance(premises_arg, dict) or isinstance(premises_arg, Dict):
# The user has supplied a dictionary of morphisms and
# their properties.
for morphism, props in premises_arg.items():
objects |= FiniteSet(morphism.domain, morphism.codomain)
Diagram._add_morphism_closure(
premises,
morphism,
FiniteSet(*props) if iterable(props) else FiniteSet(props),
)
if len(args) >= 2:
# We also have some conclusions.
conclusions_arg = args[1]
if isinstance(conclusions_arg, list):
# The user has supplied a list of morphisms, none of
# which have any attributes.
empty = EmptySet()
for morphism in conclusions_arg:
# Check that no new objects appear in conclusions.
if (morphism.domain in objects) and (morphism.codomain in objects):
# No need to add identities and recurse
# composites this time.
Diagram._add_morphism_closure(
conclusions,
morphism,
empty,
add_identities=False,
recurse_composites=False,
)
elif isinstance(conclusions_arg, dict) or isinstance(conclusions_arg, Dict):
# The user has supplied a dictionary of morphisms and
# their properties.
for morphism, props in conclusions_arg.items():
# Check that no new objects appear in conclusions.
if (morphism.domain in objects) and (morphism.codomain in objects):
# No need to add identities and recurse
# composites this time.
Diagram._add_morphism_closure(
conclusions,
morphism,
FiniteSet(*props) if iterable(props) else FiniteSet(props),
add_identities=False,
recurse_composites=False,
)
return Basic.__new__(cls, Dict(premises), Dict(conclusions), objects)
|
https://github.com/sympy/sympy/issues/8197
|
In [5]: b in FiniteSet(-a)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-66c50e010c80> in <module>()
----> 1 b in FiniteSet(-a)
/home/hargup/210fs/sympy/sympy/sets/sets.py in __contains__(self, other)
495 symb = self.contains(other)
496 if symb not in (true, false):
--> 497 raise TypeError('contains did not evaluate to a bool: %r' % symb)
498 return bool(symb)
499
TypeError: contains did not evaluate to a bool: Contains(b, {-a})
|
TypeError
|
def subdiagram_from_objects(self, objects):
"""
If ``objects`` is a subset of the objects of ``self``, returns
a diagram which has as premises all those premises of ``self``
which have a domains and codomains in ``objects``, likewise
for conclusions. Properties are preserved.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, Diagram
>>> from sympy import FiniteSet
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g], {f: "unique", g*f: "veryunique"})
>>> d1 = d.subdiagram_from_objects(FiniteSet(A, B))
>>> d1 == Diagram([f], {f: "unique"})
True
"""
if not objects.is_subset(self.objects):
raise ValueError("Supplied objects should all belong to the diagram.")
new_premises = {}
for morphism, props in self.premises.items():
if (objects.contains(morphism.domain) == S.true) and (
objects.contains(morphism.codomain) == S.true
):
new_premises[morphism] = props
new_conclusions = {}
for morphism, props in self.conclusions.items():
if (objects.contains(morphism.domain) == S.true) and (
objects.contains(morphism.codomain) == S.true
):
new_conclusions[morphism] = props
return Diagram(new_premises, new_conclusions)
|
def subdiagram_from_objects(self, objects):
"""
If ``objects`` is a subset of the objects of ``self``, returns
a diagram which has as premises all those premises of ``self``
which have a domains and codomains in ``objects``, likewise
for conclusions. Properties are preserved.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, Diagram
>>> from sympy import FiniteSet
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g], {f: "unique", g*f: "veryunique"})
>>> d1 = d.subdiagram_from_objects(FiniteSet(A, B))
>>> d1 == Diagram([f], {f: "unique"})
True
"""
if not objects.is_subset(self.objects):
raise ValueError("Supplied objects should all belong to the diagram.")
new_premises = {}
for morphism, props in self.premises.items():
if (morphism.domain in objects) and (morphism.codomain in objects):
new_premises[morphism] = props
new_conclusions = {}
for morphism, props in self.conclusions.items():
if (morphism.domain in objects) and (morphism.codomain in objects):
new_conclusions[morphism] = props
return Diagram(new_premises, new_conclusions)
|
https://github.com/sympy/sympy/issues/8197
|
In [5]: b in FiniteSet(-a)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-66c50e010c80> in <module>()
----> 1 b in FiniteSet(-a)
/home/hargup/210fs/sympy/sympy/sets/sets.py in __contains__(self, other)
495 symb = self.contains(other)
496 if symb not in (true, false):
--> 497 raise TypeError('contains did not evaluate to a bool: %r' % symb)
498 return bool(symb)
499
TypeError: contains did not evaluate to a bool: Contains(b, {-a})
|
TypeError
|
def _complement(self, other):
# this behaves as other - self
if isinstance(other, ProductSet):
# For each set consider it or it's complement
# We need at least one of the sets to be complemented
# Consider all 2^n combinations.
# We can conveniently represent these options easily using a ProductSet
# XXX: this doesn't work if the dimentions of the sets isn't same.
# A - B is essentially same as A if B has a different
# dimentionality than A
switch_sets = ProductSet(
FiniteSet(o, o - s) for s, o in zip(self.sets, other.sets)
)
product_sets = (ProductSet(*set) for set in switch_sets)
# Union of all combinations but this one
return Union(p for p in product_sets if p != other)
elif isinstance(other, Interval):
if isinstance(self, Interval) or isinstance(self, FiniteSet):
return Intersection(other, self.complement(S.Reals))
elif isinstance(other, Union):
return Union(o - self for o in other.args)
elif isinstance(other, Complement):
return Complement(other.args[0], Union(other.args[1], self))
elif isinstance(other, EmptySet):
return S.EmptySet
elif isinstance(other, FiniteSet):
return FiniteSet(*[el for el in other if self.contains(el) != True])
|
def _complement(self, other):
# this behaves as other - self
if isinstance(other, ProductSet):
# For each set consider it or it's complement
# We need at least one of the sets to be complemented
# Consider all 2^n combinations.
# We can conveniently represent these options easily using a ProductSet
# XXX: this doesn't work if the dimentions of the sets isn't same.
# A - B is essentially same as A if B has a different
# dimentionality than A
switch_sets = ProductSet(
FiniteSet(o, o - s) for s, o in zip(self.sets, other.sets)
)
product_sets = (ProductSet(*set) for set in switch_sets)
# Union of all combinations but this one
return Union(p for p in product_sets if p != other)
elif isinstance(other, Interval):
if isinstance(self, Interval) or isinstance(self, FiniteSet):
return Intersection(other, self.complement(S.Reals))
elif isinstance(other, Union):
return Union(o - self for o in other.args)
elif isinstance(other, Complement):
return Complement(other.args[0], Union(other.args[1], self))
elif isinstance(other, EmptySet):
return S.EmptySet
elif isinstance(other, FiniteSet):
return FiniteSet(*[el for el in other if el not in self])
return None
|
https://github.com/sympy/sympy/issues/8197
|
In [5]: b in FiniteSet(-a)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-66c50e010c80> in <module>()
----> 1 b in FiniteSet(-a)
/home/hargup/210fs/sympy/sympy/sets/sets.py in __contains__(self, other)
495 symb = self.contains(other)
496 if symb not in (true, false):
--> 497 raise TypeError('contains did not evaluate to a bool: %r' % symb)
498 return bool(symb)
499
TypeError: contains did not evaluate to a bool: Contains(b, {-a})
|
TypeError
|
def _eval_Eq(self, other):
if not other.is_FiniteSet:
if (
other.is_Union
or other.is_Complement
or other.is_Intersection
or other.is_ProductSet
):
return
return false
if len(self) != len(other):
return false
return And(*map(lambda x, y: Eq(x, y), self.args, other.args))
|
def _eval_Eq(self, other):
if not other.is_FiniteSet:
if (
other.is_Union
or other.is_Complement
or other.is_Intersection
or other.is_ProductSet
):
return
return false
return And(*map(lambda x, y: Eq(x, y), self.args, other.args))
|
https://github.com/sympy/sympy/issues/8197
|
In [5]: b in FiniteSet(-a)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-66c50e010c80> in <module>()
----> 1 b in FiniteSet(-a)
/home/hargup/210fs/sympy/sympy/sets/sets.py in __contains__(self, other)
495 symb = self.contains(other)
496 if symb not in (true, false):
--> 497 raise TypeError('contains did not evaluate to a bool: %r' % symb)
498 return bool(symb)
499
TypeError: contains did not evaluate to a bool: Contains(b, {-a})
|
TypeError
|
def _contains(self, other):
"""
Tests whether an element, other, is in the set.
Relies on Python's set class. This tests for object equality
All inputs are sympified
Examples
========
>>> from sympy import FiniteSet
>>> 1 in FiniteSet(1, 2)
True
>>> 5 in FiniteSet(1, 2)
False
"""
r = false
for e in self._elements:
t = Eq(e, other, evaluate=True)
if isinstance(t, Eq):
t = t.simplify()
if t == true:
return t
elif t != false:
r = None
return r
|
def _contains(self, other):
"""
Tests whether an element, other, is in the set.
Relies on Python's set class. This tests for object equality
All inputs are sympified
Examples
========
>>> from sympy import FiniteSet
>>> 1 in FiniteSet(1, 2)
True
>>> 5 in FiniteSet(1, 2)
False
"""
if other in self._elements:
return true
else:
if not other.free_symbols:
return false
elif all(e.is_Symbol for e in self._elements):
return false
|
https://github.com/sympy/sympy/issues/8197
|
In [5]: b in FiniteSet(-a)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-66c50e010c80> in <module>()
----> 1 b in FiniteSet(-a)
/home/hargup/210fs/sympy/sympy/sets/sets.py in __contains__(self, other)
495 symb = self.contains(other)
496 if symb not in (true, false):
--> 497 raise TypeError('contains did not evaluate to a bool: %r' % symb)
498 return bool(symb)
499
TypeError: contains did not evaluate to a bool: Contains(b, {-a})
|
TypeError
|
def __contains__(self, other):
# Split event into each subdomain
for domain in self.domains:
# Collect the parts of this event which associate to this domain
elem = frozenset(
[item for item in other if domain.symbols.contains(item[0]) == S.true]
)
# Test this sub-event
if elem not in domain:
return False
# All subevents passed
return True
|
def __contains__(self, other):
# Split event into each subdomain
for domain in self.domains:
# Collect the parts of this event which associate to this domain
elem = frozenset([item for item in other if item[0] in domain.symbols])
# Test this sub-event
if elem not in domain:
return False
# All subevents passed
return True
|
https://github.com/sympy/sympy/issues/8197
|
In [5]: b in FiniteSet(-a)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-66c50e010c80> in <module>()
----> 1 b in FiniteSet(-a)
/home/hargup/210fs/sympy/sympy/sets/sets.py in __contains__(self, other)
495 symb = self.contains(other)
496 if symb not in (true, false):
--> 497 raise TypeError('contains did not evaluate to a bool: %r' % symb)
498 return bool(symb)
499
TypeError: contains did not evaluate to a bool: Contains(b, {-a})
|
TypeError
|
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get("user_functions", {})
self.known_functions.update(userfuncs)
|
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get("user_functions", {})
for k, v in userfuncs.items():
if not isinstance(v, list):
userfuncs[k] = [(lambda *x: True, v)]
self.known_functions.update(userfuncs)
|
https://github.com/sympy/sympy/issues/6814
|
ufuncify(x, x*log(10)) produces the following exception
---------------------------------------------------------------------------
CodeWrapError Traceback (most recent call last)
<ipython-input-12-f6cc06919001> in <module>()
----> 1 ufuncify(x, x*log(10))
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in ufuncify(args, expr, **kwargs)
479 # first argument accepts an array
480 args[0] = x[i]
--> 481 return autowrap(C.Equality(y[i], f(*args)), **kwargs)
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in autowrap(expr, language, backend, tempdir, args, flags, verbose, helpers)
404 helps.append(Routine(name, expr, args))
405
--> 406 return code_wrapper.wrap_code(routine, helpers=helps)
407
408
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in wrap_code(self, routine, helpers)
133 self._generate_code(routine, helpers)
134 self._prepare_files(routine)
--> 135 self._process_files(routine)
136 mod = __import__(self.module_name)
137 finally:
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in _process_files(self, routine)
158 if retcode:
159 raise CodeWrapError(
--> 160 "Error while executing command: %s" % " ".join(command))
161
162
CodeWrapError: Error while executing command: f2py -m wrapper_module_6 -c wrapped_code_6.f90
If I replace log(2) by its numerical value ufuncify works as expected.
|
CodeWrapError
|
def ccode(expr, assign_to=None, **settings):
"""Converts an expr to a string of c code
Parameters
==========
expr : Expr
A sympy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=15].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, cfunction_string)]. See below
for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
Examples
========
>>> from sympy import ccode, symbols, Rational, sin, ceiling, Abs
>>> x, tau = symbols("x, tau")
>>> ccode((2*tau)**Rational(7, 2))
'8*sqrt(2)*pow(tau, 7.0L/2.0L)'
>>> ccode(sin(x), assign_to="s")
's = sin(x);'
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
cfunction_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs"),
... (lambda x: x.is_integer, "ABS")]
... }
>>> ccode(Abs(x) + ceiling(x), user_functions=custom_functions)
'fabs(x) + CEIL(x)'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(ccode(expr, tau))
if (x > 0) {
tau = x + 1;
}
else {
tau = x;
}
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> ccode(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);'
Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions
must be provided to ``assign_to``. Note that any expression that can be
generated normally can also exist inside a Matrix:
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(ccode(mat, A))
A[0][0] = pow(x, 2);
if (x > 0) {
A[1][0] = x + 1;
}
else {
A[1][0] = x;
}
A[2][0] = sin(x);
"""
return CCodePrinter(settings).doprint(expr, assign_to)
|
def ccode(expr, assign_to=None, **settings):
r"""Converts an expr to a string of c code
Parameters
==========
expr : sympy.core.Expr
a sympy expression to be converted
assign_to : optional
When given, the argument is used as the name of the
variable to which the Fortran expression is assigned.
(This is helpful in case of line-wrapping.)
precision : optional
the precision for numbers such as pi [default=15]
user_functions : optional
A dictionary where keys are FunctionClass instances and values
are their string representations. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
cfunction_string)]. See below for examples.
human : optional
If True, the result is a single string that may contain some
constant declarations for the number symbols. If False, the
same information is returned in a more programmer-friendly
data structure.
contract: optional
If True, `Indexed` instances are assumed to obey
tensor contraction rules and the corresponding nested
loops over indices are generated. Setting contract = False
will not generate loops, instead the user is responsible
to provide values for the indices in the code. [default=True]
Examples
========
>>> from sympy import ccode, symbols, Rational, sin, ceiling, Abs
>>> x, tau = symbols(["x", "tau"])
>>> ccode((2*tau)**Rational(7,2))
'8*sqrt(2)*pow(tau, 7.0L/2.0L)'
>>> ccode(sin(x), assign_to="s")
's = sin(x);'
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs"),
... (lambda x: x.is_integer, "ABS")]
... }
>>> ccode(Abs(x) + ceiling(x), user_functions=custom_functions)
'fabs(x) + CEIL(x)'
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> ccode(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);'
"""
return CCodePrinter(settings).doprint(expr, assign_to)
|
https://github.com/sympy/sympy/issues/6814
|
ufuncify(x, x*log(10)) produces the following exception
---------------------------------------------------------------------------
CodeWrapError Traceback (most recent call last)
<ipython-input-12-f6cc06919001> in <module>()
----> 1 ufuncify(x, x*log(10))
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in ufuncify(args, expr, **kwargs)
479 # first argument accepts an array
480 args[0] = x[i]
--> 481 return autowrap(C.Equality(y[i], f(*args)), **kwargs)
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in autowrap(expr, language, backend, tempdir, args, flags, verbose, helpers)
404 helps.append(Routine(name, expr, args))
405
--> 406 return code_wrapper.wrap_code(routine, helpers=helps)
407
408
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in wrap_code(self, routine, helpers)
133 self._generate_code(routine, helpers)
134 self._prepare_files(routine)
--> 135 self._process_files(routine)
136 mod = __import__(self.module_name)
137 finally:
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in _process_files(self, routine)
158 if retcode:
159 raise CodeWrapError(
--> 160 "Error while executing command: %s" % " ".join(command))
161
162
CodeWrapError: Error while executing command: f2py -m wrapper_module_6 -c wrapped_code_6.f90
If I replace log(2) by its numerical value ufuncify works as expected.
|
CodeWrapError
|
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get("user_functions", {})
self.known_functions.update(userfuncs)
# leading columns depend on fixed or free format
if self._settings["source_format"] == "fixed":
self._lead_code = " "
self._lead_cont = " @ "
self._lead_comment = "C "
elif self._settings["source_format"] == "free":
self._lead_code = ""
self._lead_cont = " "
self._lead_comment = "! "
else:
raise ValueError("Unknown source format: %s" % self._settings["source_format"])
standards = set([66, 77, 90, 95, 2003, 2008])
if self._settings["standard"] not in standards:
raise ValueError("Unknown Fortran standard: %s" % self._settings["standard"])
|
def __init__(self, settings=None):
CodePrinter.__init__(self, settings)
# leading columns depend on fixed or free format
if self._settings["source_format"] == "fixed":
self._lead_code = " "
self._lead_cont = " @ "
self._lead_comment = "C "
elif self._settings["source_format"] == "free":
self._lead_code = ""
self._lead_cont = " "
self._lead_comment = "! "
else:
raise ValueError("Unknown source format: %s" % self._settings["source_format"])
standards = set([66, 77, 90, 95, 2003, 2008])
if self._settings["standard"] not in standards:
raise ValueError("Unknown Fortran standard: %s" % self._settings["standard"])
|
https://github.com/sympy/sympy/issues/6814
|
ufuncify(x, x*log(10)) produces the following exception
---------------------------------------------------------------------------
CodeWrapError Traceback (most recent call last)
<ipython-input-12-f6cc06919001> in <module>()
----> 1 ufuncify(x, x*log(10))
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in ufuncify(args, expr, **kwargs)
479 # first argument accepts an array
480 args[0] = x[i]
--> 481 return autowrap(C.Equality(y[i], f(*args)), **kwargs)
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in autowrap(expr, language, backend, tempdir, args, flags, verbose, helpers)
404 helps.append(Routine(name, expr, args))
405
--> 406 return code_wrapper.wrap_code(routine, helpers=helps)
407
408
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in wrap_code(self, routine, helpers)
133 self._generate_code(routine, helpers)
134 self._prepare_files(routine)
--> 135 self._process_files(routine)
136 mod = __import__(self.module_name)
137 finally:
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in _process_files(self, routine)
158 if retcode:
159 raise CodeWrapError(
--> 160 "Error while executing command: %s" % " ".join(command))
161
162
CodeWrapError: Error while executing command: f2py -m wrapper_module_6 -c wrapped_code_6.f90
If I replace log(2) by its numerical value ufuncify works as expected.
|
CodeWrapError
|
def _print_Function(self, expr):
# All constant function args are evaluated as floats
prec = self._settings["precision"]
args = [N(a, prec) for a in expr.args]
eval_expr = expr.func(*args)
if not isinstance(eval_expr, C.Function):
return self._print(eval_expr)
else:
return CodePrinter._print_Function(self, expr.func(*args))
|
def _print_Function(self, expr):
name = self._settings["user_functions"].get(expr.__class__)
eargs = expr.args
if name is None:
from sympy.functions import conjugate
if expr.func == conjugate:
name = "conjg"
else:
name = expr.func.__name__
if hasattr(expr, "_imp_") and isinstance(expr._imp_, C.Lambda):
# inlined function.
# the expression is printed with _print to avoid loops
return self._print(expr._imp_(*eargs))
if expr.func.__name__ not in self._implicit_functions:
self._not_supported.add(expr)
else:
# convert all args to floats
eargs = map(N, eargs)
return "%s(%s)" % (name, self.stringify(eargs, ", "))
|
https://github.com/sympy/sympy/issues/6814
|
ufuncify(x, x*log(10)) produces the following exception
---------------------------------------------------------------------------
CodeWrapError Traceback (most recent call last)
<ipython-input-12-f6cc06919001> in <module>()
----> 1 ufuncify(x, x*log(10))
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in ufuncify(args, expr, **kwargs)
479 # first argument accepts an array
480 args[0] = x[i]
--> 481 return autowrap(C.Equality(y[i], f(*args)), **kwargs)
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in autowrap(expr, language, backend, tempdir, args, flags, verbose, helpers)
404 helps.append(Routine(name, expr, args))
405
--> 406 return code_wrapper.wrap_code(routine, helpers=helps)
407
408
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in wrap_code(self, routine, helpers)
133 self._generate_code(routine, helpers)
134 self._prepare_files(routine)
--> 135 self._process_files(routine)
136 mod = __import__(self.module_name)
137 finally:
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in _process_files(self, routine)
158 if retcode:
159 raise CodeWrapError(
--> 160 "Error while executing command: %s" % " ".join(command))
161
162
CodeWrapError: Error while executing command: f2py -m wrapper_module_6 -c wrapped_code_6.f90
If I replace log(2) by its numerical value ufuncify works as expected.
|
CodeWrapError
|
def fcode(expr, assign_to=None, **settings):
"""Converts an expr to a string of c code
Parameters
==========
expr : Expr
A sympy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=15].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, cfunction_string)]. See below
for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
source_format : optional
The source format can be either 'fixed' or 'free'. [default='fixed']
standard : integer, optional
The Fortran standard to be followed. This is specified as an integer.
Acceptable standards are 66, 77, 90, 95, 2003, and 2008. Default is 77.
Note that currently the only distinction internally is between
standards before 95, and those 95 and after. This may change later as
more features are added.
Examples
========
>>> from sympy import fcode, symbols, Rational, sin, ceiling, floor
>>> x, tau = symbols("x, tau")
>>> fcode((2*tau)**Rational(7, 2))
' 8*sqrt(2.0d0)*tau**(7.0d0/2.0d0)'
>>> fcode(sin(x), assign_to="s")
' s = sin(x)'
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
cfunction_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "floor": [(lambda x: not x.is_integer, "FLOOR1"),
... (lambda x: x.is_integer, "FLOOR2")]
... }
>>> fcode(floor(x) + ceiling(x), user_functions=custom_functions)
' CEIL(x) + FLOOR1(x)'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(fcode(expr, tau))
if (x > 0) then
tau = x + 1
else
tau = x
end if
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> fcode(e.rhs, assign_to=e.lhs, contract=False)
' Dy(i) = (y(i + 1) - y(i))/(t(i + 1) - t(i))'
Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions
must be provided to ``assign_to``. Note that any expression that can be
generated normally can also exist inside a Matrix:
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(fcode(mat, A))
A(1, 1) = x**2
if (x > 0) then
A(2, 1) = x + 1
else
A(2, 1) = x
end if
A(3, 1) = sin(x)
"""
return FCodePrinter(settings).doprint(expr, assign_to)
|
def fcode(expr, assign_to=None, **settings):
"""Converts an expr to a string of Fortran 77 code
Parameters
==========
expr : sympy.core.Expr
a sympy expression to be converted
assign_to : optional
When given, the argument is used as the name of the
variable to which the Fortran expression is assigned.
(This is helpful in case of line-wrapping.)
precision : optional
the precision for numbers such as pi [default=15]
user_functions : optional
A dictionary where keys are FunctionClass instances and values
are there string representations.
human : optional
If True, the result is a single string that may contain some
parameter statements for the number symbols. If False, the same
information is returned in a more programmer-friendly data
structure.
source_format : optional
The source format can be either 'fixed' or 'free'.
[default='fixed']
standard : optional
The Fortran standard to be followed. This is specified as an integer.
Acceptable standards are 66, 77, 90, 95, 2003, and 2008. Default is
77. Note that currently the only distinction internally is between
standards before 95, and those 95 and after. This may change later
as more features are added.
contract: optional
If True, `Indexed` instances are assumed to obey
tensor contraction rules and the corresponding nested
loops over indices are generated. Setting contract = False
will not generate loops, instead the user is responsible
to provide values for the indices in the code. [default=True]
Examples
========
>>> from sympy import fcode, symbols, Rational, pi, sin
>>> x, tau = symbols('x,tau')
>>> fcode((2*tau)**Rational(7,2))
' 8*sqrt(2.0d0)*tau**(7.0d0/2.0d0)'
>>> fcode(sin(x), assign_to="s")
' s = sin(x)'
>>> print(fcode(pi))
parameter (pi = 3.14159265358979d0)
pi
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> fcode(e.rhs, assign_to=e.lhs, contract=False)
' Dy(i) = (y(i + 1) - y(i))/(t(i + 1) - t(i))'
"""
# run the printer
return FCodePrinter(settings).doprint(expr, assign_to)
|
https://github.com/sympy/sympy/issues/6814
|
ufuncify(x, x*log(10)) produces the following exception
---------------------------------------------------------------------------
CodeWrapError Traceback (most recent call last)
<ipython-input-12-f6cc06919001> in <module>()
----> 1 ufuncify(x, x*log(10))
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in ufuncify(args, expr, **kwargs)
479 # first argument accepts an array
480 args[0] = x[i]
--> 481 return autowrap(C.Equality(y[i], f(*args)), **kwargs)
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in autowrap(expr, language, backend, tempdir, args, flags, verbose, helpers)
404 helps.append(Routine(name, expr, args))
405
--> 406 return code_wrapper.wrap_code(routine, helpers=helps)
407
408
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in wrap_code(self, routine, helpers)
133 self._generate_code(routine, helpers)
134 self._prepare_files(routine)
--> 135 self._process_files(routine)
136 mod = __import__(self.module_name)
137 finally:
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in _process_files(self, routine)
158 if retcode:
159 raise CodeWrapError(
--> 160 "Error while executing command: %s" % " ".join(command))
161
162
CodeWrapError: Error while executing command: f2py -m wrapper_module_6 -c wrapped_code_6.f90
If I replace log(2) by its numerical value ufuncify works as expected.
|
CodeWrapError
|
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get("user_functions", {})
self.known_functions.update(userfuncs)
|
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get("user_functions", {})
for k, v in userfuncs.items():
if not isinstance(v, tuple):
userfuncs[k] = (lambda *x: True, v)
self.known_functions.update(userfuncs)
|
https://github.com/sympy/sympy/issues/6814
|
ufuncify(x, x*log(10)) produces the following exception
---------------------------------------------------------------------------
CodeWrapError Traceback (most recent call last)
<ipython-input-12-f6cc06919001> in <module>()
----> 1 ufuncify(x, x*log(10))
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in ufuncify(args, expr, **kwargs)
479 # first argument accepts an array
480 args[0] = x[i]
--> 481 return autowrap(C.Equality(y[i], f(*args)), **kwargs)
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in autowrap(expr, language, backend, tempdir, args, flags, verbose, helpers)
404 helps.append(Routine(name, expr, args))
405
--> 406 return code_wrapper.wrap_code(routine, helpers=helps)
407
408
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in wrap_code(self, routine, helpers)
133 self._generate_code(routine, helpers)
134 self._prepare_files(routine)
--> 135 self._process_files(routine)
136 mod = __import__(self.module_name)
137 finally:
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in _process_files(self, routine)
158 if retcode:
159 raise CodeWrapError(
--> 160 "Error while executing command: %s" % " ".join(command))
161
162
CodeWrapError: Error while executing command: f2py -m wrapper_module_6 -c wrapped_code_6.f90
If I replace log(2) by its numerical value ufuncify works as expected.
|
CodeWrapError
|
def _print_Indexed(self, expr):
# calculate index for 1d array
dims = expr.shape
elem = S.Zero
offset = S.One
for i in reversed(range(expr.rank)):
elem += expr.indices[i] * offset
offset *= dims[i]
return "%s[%s]" % (self._print(expr.base.label), self._print(elem))
|
def _print_Indexed(self, expr):
# calculate index for 1d array
dims = expr.shape
inds = [i.label for i in expr.indices]
elem = S.Zero
offset = S.One
for i in reversed(range(expr.rank)):
elem += offset * inds[i]
offset *= dims[i]
return "%s[%s]" % (self._print(expr.base.label), self._print(elem))
|
https://github.com/sympy/sympy/issues/6814
|
ufuncify(x, x*log(10)) produces the following exception
---------------------------------------------------------------------------
CodeWrapError Traceback (most recent call last)
<ipython-input-12-f6cc06919001> in <module>()
----> 1 ufuncify(x, x*log(10))
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in ufuncify(args, expr, **kwargs)
479 # first argument accepts an array
480 args[0] = x[i]
--> 481 return autowrap(C.Equality(y[i], f(*args)), **kwargs)
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in autowrap(expr, language, backend, tempdir, args, flags, verbose, helpers)
404 helps.append(Routine(name, expr, args))
405
--> 406 return code_wrapper.wrap_code(routine, helpers=helps)
407
408
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in wrap_code(self, routine, helpers)
133 self._generate_code(routine, helpers)
134 self._prepare_files(routine)
--> 135 self._process_files(routine)
136 mod = __import__(self.module_name)
137 finally:
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in _process_files(self, routine)
158 if retcode:
159 raise CodeWrapError(
--> 160 "Error while executing command: %s" % " ".join(command))
161
162
CodeWrapError: Error while executing command: f2py -m wrapper_module_6 -c wrapped_code_6.f90
If I replace log(2) by its numerical value ufuncify works as expected.
|
CodeWrapError
|
def jscode(expr, assign_to=None, **settings):
"""Converts an expr to a string of javascript code
Parameters
==========
expr : Expr
A sympy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=15].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, js_function_string)]. See
below for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
Examples
========
>>> from sympy import jscode, symbols, Rational, sin, ceiling, Abs
>>> x, tau = symbols("x, tau")
>>> jscode((2*tau)**Rational(7, 2))
'8*Math.sqrt(2)*Math.pow(tau, 7/2)'
>>> jscode(sin(x), assign_to="s")
's = Math.sin(x);'
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
js_function_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs"),
... (lambda x: x.is_integer, "ABS")]
... }
>>> jscode(Abs(x) + ceiling(x), user_functions=custom_functions)
'fabs(x) + CEIL(x)'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(jscode(expr, tau))
if (x > 0) {
tau = x + 1;
}
else {
tau = x;
}
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> jscode(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);'
Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions
must be provided to ``assign_to``. Note that any expression that can be
generated normally can also exist inside a Matrix:
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(jscode(mat, A))
A[0][0] = Math.pow(x, 2);
if (x > 0) {
A[1][0] = x + 1;
}
else {
A[1][0] = x;
}
A[2][0] = Math.sin(x);
"""
return JavascriptCodePrinter(settings).doprint(expr, assign_to)
|
def jscode(expr, assign_to=None, **settings):
"""Converts an expr to a string of javascript code
Parameters
==========
expr : sympy.core.Expr
a sympy expression to be converted
assign_to : optional
When given, the argument is used as the name of the
variable to which the Fortran expression is assigned.
(This is helpful in case of line-wrapping.)
precision : optional
the precision for numbers such as pi [default=15]
user_functions : optional
A dictionary where keys are FunctionClass instances and values
are their string representations. Alternatively the
dictionary values can be a list of tuples i.e. [(argument_test,
jsfunction_string)].
human : optional
If True, the result is a single string that may contain some
constant declarations for the number symbols. If False, the
same information is returned in a more programmer-friendly
data structure.
Examples
========
>>> from sympy import jscode, symbols, Rational, sin
>>> x, tau = symbols(["x", "tau"])
>>> jscode((2*tau)**Rational(7,2))
'8*Math.sqrt(2)*Math.pow(tau, 7/2)'
>>> jscode(sin(x), assign_to="s")
's = Math.sin(x);'
"""
return JavascriptCodePrinter(settings).doprint(expr, assign_to)
|
https://github.com/sympy/sympy/issues/6814
|
ufuncify(x, x*log(10)) produces the following exception
---------------------------------------------------------------------------
CodeWrapError Traceback (most recent call last)
<ipython-input-12-f6cc06919001> in <module>()
----> 1 ufuncify(x, x*log(10))
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in ufuncify(args, expr, **kwargs)
479 # first argument accepts an array
480 args[0] = x[i]
--> 481 return autowrap(C.Equality(y[i], f(*args)), **kwargs)
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in autowrap(expr, language, backend, tempdir, args, flags, verbose, helpers)
404 helps.append(Routine(name, expr, args))
405
--> 406 return code_wrapper.wrap_code(routine, helpers=helps)
407
408
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in wrap_code(self, routine, helpers)
133 self._generate_code(routine, helpers)
134 self._prepare_files(routine)
--> 135 self._process_files(routine)
136 mod = __import__(self.module_name)
137 finally:
/home/thomas/gitrepos/sympy/sympy/utilities/autowrap.py in _process_files(self, routine)
158 if retcode:
159 raise CodeWrapError(
--> 160 "Error while executing command: %s" % " ".join(command))
161
162
CodeWrapError: Error while executing command: f2py -m wrapper_module_6 -c wrapped_code_6.f90
If I replace log(2) by its numerical value ufuncify works as expected.
|
CodeWrapError
|
def load_ipython_extension(ip):
"""Load the extension in IPython."""
init_printing(ip=ip)
|
def load_ipython_extension(ip):
"""Load the extension in IPython."""
import IPython
global _loaded
# Use extension manager to track loaded status if available
# This is currently in IPython 0.14.dev
if hasattr(ip.extension_manager, "loaded"):
loaded = "sympy.interactive.ipythonprinting" not in ip.extension_manager.loaded
else:
loaded = _loaded
if not loaded:
init_printing(ip=ip)
_loaded = True
|
https://github.com/sympy/sympy/issues/3619
|
Traceback (most recent call last):
File "./setup.py", line 34, in ?
import sympy
File "/tmp/sympy/sympy/**init**.py", line 29, in ?
from printing import pretty, pretty_print, pprint, pprint_use_unicode, \
File "/tmp/sympy/sympy/printing/**init**.py", line 8, in ?
from preview import preview, view, pngview, pdfview, dviview
File "/tmp/sympy/sympy/printing/preview.py", line 4, in ?
import pexpect
ImportError: No module named pexpect
|
ImportError
|
def forward(self, x1, x2, diag=False, **params):
batch_shape = x1.shape[:-2]
n_batch_dims = len(batch_shape)
n1, d = x1.shape[-2:]
n2 = x2.shape[-2]
K = torch.zeros(
*batch_shape, n1 * (d + 1), n2 * (d + 1), device=x1.device, dtype=x1.dtype
)
if not diag:
# Scale the inputs by the lengthscale (for stability)
x1_ = x1.div(self.lengthscale)
x2_ = x2.div(self.lengthscale)
# Form all possible rank-1 products for the gradient and Hessian blocks
outer = x1_.view(*batch_shape, n1, 1, d) - x2_.view(*batch_shape, 1, n2, d)
outer = outer / self.lengthscale.unsqueeze(-2)
outer = torch.transpose(outer, -1, -2).contiguous()
# 1) Kernel block
diff = self.covar_dist(
x1_, x2_, square_dist=True, dist_postprocess_func=postprocess_rbf, **params
)
K_11 = diff
K[..., :n1, :n2] = K_11
# 2) First gradient block
outer1 = outer.view(*batch_shape, n1, n2 * d)
K[..., :n1, n2:] = outer1 * K_11.repeat([*([1] * (n_batch_dims + 1)), d])
# 3) Second gradient block
outer2 = outer.transpose(-1, -3).reshape(*batch_shape, n2, n1 * d)
outer2 = outer2.transpose(-1, -2)
K[..., n1:, :n2] = -outer2 * K_11.repeat([*([1] * n_batch_dims), d, 1])
# 4) Hessian block
outer3 = outer1.repeat([*([1] * n_batch_dims), d, 1]) * outer2.repeat(
[*([1] * (n_batch_dims + 1)), d]
)
kp = KroneckerProductLazyTensor(
torch.eye(d, d, device=x1.device, dtype=x1.dtype).repeat(*batch_shape, 1, 1)
/ self.lengthscale.pow(2),
torch.ones(n1, n2, device=x1.device, dtype=x1.dtype).repeat(
*batch_shape, 1, 1
),
)
chain_rule = kp.evaluate() - outer3
K[..., n1:, n2:] = chain_rule * K_11.repeat([*([1] * n_batch_dims), d, d])
# Symmetrize for stability
if n1 == n2 and torch.eq(x1, x2).all():
K = 0.5 * (K.transpose(-1, -2) + K)
# Apply a perfect shuffle permutation to match the MutiTask ordering
pi1 = torch.arange(n1 * (d + 1)).view(d + 1, n1).t().reshape((n1 * (d + 1)))
pi2 = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().reshape((n2 * (d + 1)))
K = K[..., pi1, :][..., :, pi2]
return K
else:
if not (n1 == n2 and torch.eq(x1, x2).all()):
raise RuntimeError("diag=True only works when x1 == x2")
kernel_diag = super(RBFKernelGrad, self).forward(x1, x2, diag=True)
grad_diag = torch.ones(
*batch_shape, n2, d, device=x1.device, dtype=x1.dtype
) / self.lengthscale.pow(2)
grad_diag = grad_diag.transpose(-1, -2).reshape(*batch_shape, n2 * d)
k_diag = torch.cat((kernel_diag, grad_diag), dim=-1)
pi = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().reshape((n2 * (d + 1)))
return k_diag[..., pi]
|
def forward(self, x1, x2, diag=False, **params):
batch_shape = x1.shape[:-2]
n_batch_dims = len(batch_shape)
n1, d = x1.shape[-2:]
n2 = x2.shape[-2]
K = torch.zeros(
*batch_shape, n1 * (d + 1), n2 * (d + 1), device=x1.device, dtype=x1.dtype
)
if not diag:
# Scale the inputs by the lengthscale (for stability)
x1_ = x1.div(self.lengthscale)
x2_ = x2.div(self.lengthscale)
# Form all possible rank-1 products for the gradient and Hessian blocks
outer = x1_.view(*batch_shape, n1, 1, d) - x2_.view(*batch_shape, 1, n2, d)
outer = outer / self.lengthscale.unsqueeze(-2)
outer = torch.transpose(outer, -1, -2).contiguous()
# 1) Kernel block
diff = self.covar_dist(
x1_, x2_, square_dist=True, dist_postprocess_func=postprocess_rbf, **params
)
K_11 = diff
K[..., :n1, :n2] = K_11
# 2) First gradient block
outer1 = outer.view(*batch_shape, n1, n2 * d)
K[..., :n1, n2:] = outer1 * K_11.repeat([*([1] * (n_batch_dims + 1)), d])
# 3) Second gradient block
outer2 = outer.transpose(-1, -3).reshape(*batch_shape, n2, n1 * d)
outer2 = outer2.transpose(-1, -2)
K[..., n1:, :n2] = -outer2 * K_11.repeat([*([1] * n_batch_dims), d, 1])
# 4) Hessian block
outer3 = outer1.repeat([*([1] * n_batch_dims), d, 1]) * outer2.repeat(
[*([1] * (n_batch_dims + 1)), d]
)
kp = KroneckerProductLazyTensor(
torch.eye(d, d, device=x1.device, dtype=x1.dtype).repeat(*batch_shape, 1, 1)
/ self.lengthscale.pow(2),
torch.ones(n1, n2, device=x1.device, dtype=x1.dtype).repeat(
*batch_shape, 1, 1
),
)
chain_rule = kp.evaluate() - outer3
K[..., n1:, n2:] = chain_rule * K_11.repeat([*([1] * n_batch_dims), d, d])
# Symmetrize for stability
if n1 == n2 and torch.eq(x1, x2).all():
K = 0.5 * (K.transpose(-1, -2) + K)
# Apply a perfect shuffle permutation to match the MutiTask ordering
pi1 = torch.arange(n1 * (d + 1)).view(d + 1, n1).t().reshape((n1 * (d + 1)))
pi2 = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().reshape((n2 * (d + 1)))
K = K[..., pi1, :][..., :, pi2]
return K
else:
if not (n1 == n2 and torch.eq(x1, x2).all()):
raise RuntimeError("diag=True only works when x1 == x2")
kernel_diag = super(RBFKernelGrad, self).forward(x1, x2, diag=True)
grad_diag = torch.ones(
*batch_shape, n2, d, device=x1.device, dtype=x1.dtype
) / self.lengthscale.pow_(2)
grad_diag = grad_diag.transpose(-1, -2).reshape(*batch_shape, n2 * d)
k_diag = torch.cat((kernel_diag, grad_diag), dim=-1)
pi = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().reshape((n2 * (d + 1)))
return k_diag[..., pi]
|
https://github.com/cornellius-gp/gpytorch/issues/1389
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-17-ecb6525006fe> in <module>
1 z = k(train_x, train_x, diag=True)
----> 2 z[0].backward()
3 print(k.raw_lengthscale.grad)
/mnt/xarfuse/uid-66331/be3771ae-seed-nspid4026531836-ns-4026531840/torch/tensor.py in backward(self, gradient, retain_graph, create_graph, inputs)
231 create_graph=create_graph,
232 inputs=inputs)
--> 233 torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
234
235 def register_hook(self, hook):
/mnt/xarfuse/uid-66331/be3771ae-seed-nspid4026531836-ns-4026531840/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)
144 Variable._execution_engine.run_backward(
145 tensors, grad_tensors_, retain_graph, create_graph, inputs,
--> 146 allow_unreachable=True, accumulate_grad=True) # allow_unreachable flag
147
148
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.FloatTensor [1, 1]], which is output 0 of SoftplusBackward, is at version 1; expected version 0 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True).
|
RuntimeError
|
def __init__(
self,
lower_bound,
upper_bound,
transform=sigmoid,
inv_transform=inv_sigmoid,
initial_value=None,
):
"""
Defines an interval constraint for GP model parameters, specified by a lower bound and upper bound. For usage
details, see the documentation for :meth:`~gpytorch.module.Module.register_constraint`.
Args:
lower_bound (float or torch.Tensor): The lower bound on the parameter.
upper_bound (float or torch.Tensor): The upper bound on the parameter.
"""
lower_bound = torch.as_tensor(lower_bound).float()
upper_bound = torch.as_tensor(upper_bound).float()
if torch.any(torch.ge(lower_bound, upper_bound)):
raise RuntimeError("Got parameter bounds with empty intervals.")
super().__init__()
self.register_buffer("lower_bound", lower_bound)
self.register_buffer("upper_bound", upper_bound)
self._transform = transform
self._inv_transform = inv_transform
self._initial_value = initial_value
if transform is not None and inv_transform is None:
self._inv_transform = _get_inv_param_transform(transform)
|
def __init__(
self,
lower_bound,
upper_bound,
transform=sigmoid,
inv_transform=inv_sigmoid,
initial_value=None,
):
"""
Defines an interval constraint for GP model parameters, specified by a lower bound and upper bound. For usage
details, see the documentation for :meth:`~gpytorch.module.Module.register_constraint`.
Args:
lower_bound (float or torch.Tensor): The lower bound on the parameter.
upper_bound (float or torch.Tensor): The upper bound on the parameter.
"""
lower_bound = torch.as_tensor(lower_bound)
upper_bound = torch.as_tensor(upper_bound)
if torch.any(torch.ge(lower_bound, upper_bound)):
raise RuntimeError("Got parameter bounds with empty intervals.")
super().__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self._transform = transform
self._inv_transform = inv_transform
self._initial_value = initial_value
if transform is not None and inv_transform is None:
self._inv_transform = _get_inv_param_transform(transform)
|
https://github.com/cornellius-gp/gpytorch/issues/1305
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-1-068518c8dae8> in <module>
33 mll = ExactMarginalLogLikelihood(likelihood, model)
34
---> 35 loss = mll(model(X), y)
~/github/gpytorch/gpytorch/module.py in __call__(self, *inputs, **kwargs)
26
27 def __call__(self, *inputs, **kwargs):
---> 28 outputs = self.forward(*inputs, **kwargs)
29 if isinstance(outputs, list):
30 return [_validate_module_outputs(output) for output in outputs]
~/github/gpytorch/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, function_dist, target, *params)
49 # Get the log prob of the marginal distribution
50 output = self.likelihood(function_dist, *params)
---> 51 res = output.log_prob(target)
52
53 # Add additional terms (SGPR / learned inducing points, heteroskedastic likelihood models)
~/github/gpytorch/gpytorch/distributions/multivariate_normal.py in log_prob(self, value)
133
134 # Get log determininat and first part of quadratic form
--> 135 inv_quad, logdet = covar.inv_quad_logdet(inv_quad_rhs=diff.unsqueeze(-1), logdet=True)
136
137 res = -0.5 * sum([inv_quad, logdet, diff.size(-1) * math.log(2 * math.pi)])
~/github/gpytorch/gpytorch/lazy/lazy_tensor.py in inv_quad_logdet(self, inv_quad_rhs, logdet, reduce_inv_quad)
1000 from .chol_lazy_tensor import CholLazyTensor
1001
-> 1002 cholesky = CholLazyTensor(self.cholesky())
1003 return cholesky.inv_quad_logdet(inv_quad_rhs=inv_quad_rhs, logdet=logdet, reduce_inv_quad=reduce_inv_quad)
1004
~/github/gpytorch/gpytorch/lazy/lazy_tensor.py in cholesky(self, upper)
737 (LazyTensor) Cholesky factor (lower triangular)
738 """
--> 739 res = self._cholesky()
740 if upper:
741 res = res.transpose(-1, -2)
~/github/gpytorch/gpytorch/utils/memoize.py in g(self, *args, **kwargs)
32 cache_name = name if name is not None else method
33 if not is_in_cache(self, cache_name):
---> 34 add_to_cache(self, cache_name, method(self, *args, **kwargs))
35 return get_from_cache(self, cache_name)
36
~/github/gpytorch/gpytorch/lazy/lazy_tensor.py in _cholesky(self)
400 from .keops_lazy_tensor import KeOpsLazyTensor
401
--> 402 evaluated_kern_mat = self.evaluate_kernel()
403
404 if any(isinstance(sub_mat, KeOpsLazyTensor) for sub_mat in evaluated_kern_mat._args):
~/github/gpytorch/gpytorch/lazy/lazy_tensor.py in evaluate_kernel(self)
883 all lazily evaluated kernels actually evaluated.
884 """
--> 885 return self.representation_tree()(*self.representation())
886
887 def inv_matmul(self, right_tensor, left_tensor=None):
~/github/gpytorch/gpytorch/lazy/lazy_tensor.py in representation_tree(self)
1273 including all subobjects. This is used internally.
1274 """
-> 1275 return LazyTensorRepresentationTree(self)
1276
1277 @property
~/github/gpytorch/gpytorch/lazy/lazy_tensor_representation_tree.py in __init__(self, lazy_tsr)
11 for arg in lazy_tsr._args:
12 if hasattr(arg, "representation") and callable(arg.representation): # Is it a lazy tensor?
---> 13 representation_size = len(arg.representation())
14 self.children.append((slice(counter, counter + representation_size, None), arg.representation_tree()))
15 counter += representation_size
~/github/gpytorch/gpytorch/lazy/lazy_evaluated_kernel_tensor.py in representation(self)
311 # representation
312 else:
--> 313 return self.evaluate_kernel().representation()
314
315 def representation_tree(self):
~/github/gpytorch/gpytorch/utils/memoize.py in g(self, *args, **kwargs)
32 cache_name = name if name is not None else method
33 if not is_in_cache(self, cache_name):
---> 34 add_to_cache(self, cache_name, method(self, *args, **kwargs))
35 return get_from_cache(self, cache_name)
36
~/github/gpytorch/gpytorch/lazy/lazy_evaluated_kernel_tensor.py in evaluate_kernel(self)
278 temp_active_dims = self.kernel.active_dims
279 self.kernel.active_dims = None
--> 280 res = self.kernel(x1, x2, diag=False, last_dim_is_batch=self.last_dim_is_batch, **self.params)
281 self.kernel.active_dims = temp_active_dims
282
~/github/gpytorch/gpytorch/kernels/kernel.py in __call__(self, x1, x2, diag, last_dim_is_batch, **params)
394 res = LazyEvaluatedKernelTensor(x1_, x2_, kernel=self, last_dim_is_batch=last_dim_is_batch, **params)
395 else:
--> 396 res = lazify(super(Kernel, self).__call__(x1_, x2_, last_dim_is_batch=last_dim_is_batch, **params))
397 return res
398
~/github/gpytorch/gpytorch/module.py in __call__(self, *inputs, **kwargs)
26
27 def __call__(self, *inputs, **kwargs):
---> 28 outputs = self.forward(*inputs, **kwargs)
29 if isinstance(outputs, list):
30 return [_validate_module_outputs(output) for output in outputs]
~/github/gpytorch/gpytorch/kernels/rbf_kernel.py in forward(self, x1, x2, diag, **params)
87 x1,
88 x2,
---> 89 self.lengthscale,
90 lambda x1, x2: self.covar_dist(
91 x1, x2, square_dist=True, diag=False, dist_postprocess_func=postprocess_rbf, postprocess=False, **params
~/github/gpytorch/gpytorch/kernels/kernel.py in lengthscale(self)
237 def lengthscale(self):
238 if self.has_lengthscale:
--> 239 return self.raw_lengthscale_constraint.transform(self.raw_lengthscale)
240 else:
241 return None
~/github/gpytorch/gpytorch/constraints/constraints.py in transform(self, tensor)
92 min_bound = torch.min(self.lower_bound)
93
---> 94 if max_bound == math.inf or min_bound == -math.inf:
95 raise RuntimeError(
96 "Cannot make an Interval directly with non-finite bounds. Use a derived class like "
~/anaconda3/lib/python3.7/site-packages/torch/tensor.py in wrapped(*args, **kwargs)
26 def wrapped(*args, **kwargs):
27 try:
---> 28 return f(*args, **kwargs)
29 except TypeError:
30 return NotImplemented
RuntimeError: value cannot be converted to type int64_t without overflow: inf
|
RuntimeError
|
def _preconditioner(self):
r"""
Here we use a partial pivoted Cholesky preconditioner:
K \approx L L^T + D
where L L^T is a low rank approximation, and D is a diagonal.
We can compute the preconditioner's inverse using Woodbury
(L L^T + D)^{-1} = D^{-1} - D^{-1} L (I + L D^{-1} L^T)^{-1} L^T D^{-1}
This function returns:
- A function `precondition_closure` that computes the solve (L L^T + D)^{-1} x
- A LazyTensor `precondition_lt` that represents (L L^T + D)
- The log determinant of (L L^T + D)
"""
if self.preconditioner_override is not None:
return self.preconditioner_override(self)
if (
settings.max_preconditioner_size.value() == 0
or self.size(-1) < settings.min_preconditioning_size.value()
):
return None, None, None
# Cache a QR decomposition [Q; Q'] R = [D^{-1/2}; L]
# This makes it fast to compute solves and log determinants with it
#
# Through woodbury, (L L^T + D)^{-1} reduces down to (D^{-1} - D^{-1/2} Q Q^T D^{-1/2})
# Through matrix determinant lemma, log |L L^T + D| reduces down to 2 log |R|
if self._q_cache is None:
max_iter = settings.max_preconditioner_size.value()
self._piv_chol_self = pivoted_cholesky.pivoted_cholesky(
self._lazy_tensor, max_iter
)
if torch.any(torch.isnan(self._piv_chol_self)).item():
warnings.warn(
"NaNs encountered in preconditioner computation. Attempting to continue without preconditioning.",
NumericalWarning,
)
return None, None, None
self._init_cache()
# NOTE: We cannot memoize this precondition closure as it causes a memory leak
def precondition_closure(tensor):
# This makes it fast to compute solves with it
qqt = self._q_cache.matmul(self._q_cache.transpose(-2, -1).matmul(tensor))
if self._constant_diag:
return (1 / self._noise) * (tensor - qqt)
return (tensor / self._noise) - qqt
return (precondition_closure, self._precond_lt, self._precond_logdet_cache)
|
def _preconditioner(self):
if self.preconditioner_override is not None:
return self.preconditioner_override(self)
if (
settings.max_preconditioner_size.value() == 0
or self.size(-1) < settings.min_preconditioning_size.value()
):
return None, None, None
if self._q_cache is None:
max_iter = settings.max_preconditioner_size.value()
self._piv_chol_self = pivoted_cholesky.pivoted_cholesky(
self._lazy_tensor, max_iter
)
if torch.any(torch.isnan(self._piv_chol_self)).item():
warnings.warn(
"NaNs encountered in preconditioner computation. Attempting to continue without preconditioning.",
NumericalWarning,
)
return None, None, None
self._init_cache()
# NOTE: We cannot memoize this precondition closure as it causes a memory leak
def precondition_closure(tensor):
qqt = self._q_cache.matmul(self._q_cache.transpose(-2, -1).matmul(tensor))
if self._constant_diag:
return (1 / self._noise) * (tensor - qqt)
return (tensor / self._noise) - qqt
return (precondition_closure, self._precond_lt, self._precond_logdet_cache)
|
https://github.com/cornellius-gp/gpytorch/issues/1298
|
RuntimeError Traceback (most recent call last)
<ipython-input-4-9e151e2de37a> in <module>
24 with gpytorch.settings.max_cholesky_size(100), gpytorch.settings.min_preconditioning_size(100):
25 train_dist = gp(train_x)
---> 26 loss = -mll(train_dist, train_y).sum()
~/Code/gpytorch/gpytorch/module.py in __call__(self, *inputs, **kwargs)
26
27 def __call__(self, *inputs, **kwargs):
---> 28 outputs = self.forward(*inputs, **kwargs)
29 if isinstance(outputs, list):
30 return [_validate_module_outputs(output) for output in outputs]
~/Code/gpytorch/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, function_dist, target, *params)
49 # Get the log prob of the marginal distribution
50 output = self.likelihood(function_dist, *params)
---> 51 res = output.log_prob(target)
52
53 # Add additional terms (SGPR / learned inducing points, heteroskedastic likelihood models)
~/Code/gpytorch/gpytorch/distributions/multivariate_normal.py in log_prob(self, value)
138
139 # Get log determininat and first part of quadratic form
--> 140 inv_quad, logdet = covar.inv_quad_logdet(inv_quad_rhs=diff.unsqueeze(-1), logdet=True)
141
142 res = -0.5 * sum([inv_quad, logdet, diff.size(-1) * math.log(2 * math.pi)])
~/Code/gpytorch/gpytorch/lazy/lazy_tensor.py in inv_quad_logdet(self, inv_quad_rhs, logdet, reduce_inv_quad)
1069 probe_vectors,
1070 probe_vector_norms,
-> 1071 *args,
1072 )
1073
~/Code/gpytorch/gpytorch/functions/_inv_quad_log_det.py in forward(ctx, representation_tree, dtype, device, matrix_shape, batch_shape, inv_quad, logdet, probe_vectors, probe_vector_norms, *args)
65 lazy_tsr = ctx.representation_tree(*matrix_args)
66 with torch.no_grad():
---> 67 preconditioner, precond_lt, logdet_correction = lazy_tsr._preconditioner()
68
69 ctx.preconditioner = preconditioner
~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _preconditioner(self)
84 )
85 return None, None, None
---> 86 self._init_cache()
87
88 # NOTE: We cannot memoize this precondition closure as it causes a memory leak
~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _init_cache(self)
107 self._init_cache_for_constant_diag(eye, batch_shape, n, k)
108 else:
--> 109 self._init_cache_for_non_constant_diag(eye, batch_shape, n)
110
111 self._precond_lt = PsdSumLazyTensor(RootLazyTensor(self._piv_chol_self), self._diag_tensor)
~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _init_cache_for_non_constant_diag(self, eye, batch_shape, n)
125 # With non-constant diagonals, we cant factor out the noise as easily
126 # eye = eye.expand(*batch_shape, -1, -1)
--> 127 self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self / self._noise.sqrt(), eye)))
128 self._q_cache = self._q_cache[..., :n, :] / self._noise.sqrt()
129
RuntimeError: Tensors must have same number of dimensions: got 3 and 2
|
RuntimeError
|
def precondition_closure(tensor):
# This makes it fast to compute solves with it
qqt = self._q_cache.matmul(self._q_cache.transpose(-2, -1).matmul(tensor))
if self._constant_diag:
return (1 / self._noise) * (tensor - qqt)
return (tensor / self._noise) - qqt
|
def precondition_closure(tensor):
qqt = self._q_cache.matmul(self._q_cache.transpose(-2, -1).matmul(tensor))
if self._constant_diag:
return (1 / self._noise) * (tensor - qqt)
return (tensor / self._noise) - qqt
|
https://github.com/cornellius-gp/gpytorch/issues/1298
|
RuntimeError Traceback (most recent call last)
<ipython-input-4-9e151e2de37a> in <module>
24 with gpytorch.settings.max_cholesky_size(100), gpytorch.settings.min_preconditioning_size(100):
25 train_dist = gp(train_x)
---> 26 loss = -mll(train_dist, train_y).sum()
~/Code/gpytorch/gpytorch/module.py in __call__(self, *inputs, **kwargs)
26
27 def __call__(self, *inputs, **kwargs):
---> 28 outputs = self.forward(*inputs, **kwargs)
29 if isinstance(outputs, list):
30 return [_validate_module_outputs(output) for output in outputs]
~/Code/gpytorch/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, function_dist, target, *params)
49 # Get the log prob of the marginal distribution
50 output = self.likelihood(function_dist, *params)
---> 51 res = output.log_prob(target)
52
53 # Add additional terms (SGPR / learned inducing points, heteroskedastic likelihood models)
~/Code/gpytorch/gpytorch/distributions/multivariate_normal.py in log_prob(self, value)
138
139 # Get log determininat and first part of quadratic form
--> 140 inv_quad, logdet = covar.inv_quad_logdet(inv_quad_rhs=diff.unsqueeze(-1), logdet=True)
141
142 res = -0.5 * sum([inv_quad, logdet, diff.size(-1) * math.log(2 * math.pi)])
~/Code/gpytorch/gpytorch/lazy/lazy_tensor.py in inv_quad_logdet(self, inv_quad_rhs, logdet, reduce_inv_quad)
1069 probe_vectors,
1070 probe_vector_norms,
-> 1071 *args,
1072 )
1073
~/Code/gpytorch/gpytorch/functions/_inv_quad_log_det.py in forward(ctx, representation_tree, dtype, device, matrix_shape, batch_shape, inv_quad, logdet, probe_vectors, probe_vector_norms, *args)
65 lazy_tsr = ctx.representation_tree(*matrix_args)
66 with torch.no_grad():
---> 67 preconditioner, precond_lt, logdet_correction = lazy_tsr._preconditioner()
68
69 ctx.preconditioner = preconditioner
~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _preconditioner(self)
84 )
85 return None, None, None
---> 86 self._init_cache()
87
88 # NOTE: We cannot memoize this precondition closure as it causes a memory leak
~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _init_cache(self)
107 self._init_cache_for_constant_diag(eye, batch_shape, n, k)
108 else:
--> 109 self._init_cache_for_non_constant_diag(eye, batch_shape, n)
110
111 self._precond_lt = PsdSumLazyTensor(RootLazyTensor(self._piv_chol_self), self._diag_tensor)
~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _init_cache_for_non_constant_diag(self, eye, batch_shape, n)
125 # With non-constant diagonals, we cant factor out the noise as easily
126 # eye = eye.expand(*batch_shape, -1, -1)
--> 127 self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self / self._noise.sqrt(), eye)))
128 self._q_cache = self._q_cache[..., :n, :] / self._noise.sqrt()
129
RuntimeError: Tensors must have same number of dimensions: got 3 and 2
|
RuntimeError
|
def _init_cache(self):
*batch_shape, n, k = self._piv_chol_self.shape
self._noise = self._diag_tensor.diag().unsqueeze(-1)
# the check for constant diag needs to be done carefully for batches.
noise_first_element = self._noise[..., :1, :]
self._constant_diag = torch.equal(
self._noise, noise_first_element * torch.ones_like(self._noise)
)
eye = torch.eye(
k, dtype=self._piv_chol_self.dtype, device=self._piv_chol_self.device
)
eye = eye.expand(*batch_shape, k, k)
if self._constant_diag:
self._init_cache_for_constant_diag(eye, batch_shape, n, k)
else:
self._init_cache_for_non_constant_diag(eye, batch_shape, n)
self._precond_lt = PsdSumLazyTensor(
RootLazyTensor(self._piv_chol_self), self._diag_tensor
)
|
def _init_cache(self):
*batch_shape, n, k = self._piv_chol_self.shape
self._noise = self._diag_tensor.diag().unsqueeze(-1)
# the check for constant diag needs to be done carefully for batches.
noise_first_element = self._noise[..., :1, :]
self._constant_diag = torch.equal(
self._noise, noise_first_element * torch.ones_like(self._noise)
)
eye = torch.eye(
k, dtype=self._piv_chol_self.dtype, device=self._piv_chol_self.device
)
if self._constant_diag:
self._init_cache_for_constant_diag(eye, batch_shape, n, k)
else:
self._init_cache_for_non_constant_diag(eye, batch_shape, n)
self._precond_lt = PsdSumLazyTensor(
RootLazyTensor(self._piv_chol_self), self._diag_tensor
)
|
https://github.com/cornellius-gp/gpytorch/issues/1298
|
RuntimeError Traceback (most recent call last)
<ipython-input-4-9e151e2de37a> in <module>
24 with gpytorch.settings.max_cholesky_size(100), gpytorch.settings.min_preconditioning_size(100):
25 train_dist = gp(train_x)
---> 26 loss = -mll(train_dist, train_y).sum()
~/Code/gpytorch/gpytorch/module.py in __call__(self, *inputs, **kwargs)
26
27 def __call__(self, *inputs, **kwargs):
---> 28 outputs = self.forward(*inputs, **kwargs)
29 if isinstance(outputs, list):
30 return [_validate_module_outputs(output) for output in outputs]
~/Code/gpytorch/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, function_dist, target, *params)
49 # Get the log prob of the marginal distribution
50 output = self.likelihood(function_dist, *params)
---> 51 res = output.log_prob(target)
52
53 # Add additional terms (SGPR / learned inducing points, heteroskedastic likelihood models)
~/Code/gpytorch/gpytorch/distributions/multivariate_normal.py in log_prob(self, value)
138
139 # Get log determininat and first part of quadratic form
--> 140 inv_quad, logdet = covar.inv_quad_logdet(inv_quad_rhs=diff.unsqueeze(-1), logdet=True)
141
142 res = -0.5 * sum([inv_quad, logdet, diff.size(-1) * math.log(2 * math.pi)])
~/Code/gpytorch/gpytorch/lazy/lazy_tensor.py in inv_quad_logdet(self, inv_quad_rhs, logdet, reduce_inv_quad)
1069 probe_vectors,
1070 probe_vector_norms,
-> 1071 *args,
1072 )
1073
~/Code/gpytorch/gpytorch/functions/_inv_quad_log_det.py in forward(ctx, representation_tree, dtype, device, matrix_shape, batch_shape, inv_quad, logdet, probe_vectors, probe_vector_norms, *args)
65 lazy_tsr = ctx.representation_tree(*matrix_args)
66 with torch.no_grad():
---> 67 preconditioner, precond_lt, logdet_correction = lazy_tsr._preconditioner()
68
69 ctx.preconditioner = preconditioner
~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _preconditioner(self)
84 )
85 return None, None, None
---> 86 self._init_cache()
87
88 # NOTE: We cannot memoize this precondition closure as it causes a memory leak
~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _init_cache(self)
107 self._init_cache_for_constant_diag(eye, batch_shape, n, k)
108 else:
--> 109 self._init_cache_for_non_constant_diag(eye, batch_shape, n)
110
111 self._precond_lt = PsdSumLazyTensor(RootLazyTensor(self._piv_chol_self), self._diag_tensor)
~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _init_cache_for_non_constant_diag(self, eye, batch_shape, n)
125 # With non-constant diagonals, we cant factor out the noise as easily
126 # eye = eye.expand(*batch_shape, -1, -1)
--> 127 self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self / self._noise.sqrt(), eye)))
128 self._q_cache = self._q_cache[..., :n, :] / self._noise.sqrt()
129
RuntimeError: Tensors must have same number of dimensions: got 3 and 2
|
RuntimeError
|
def _init_cache_for_non_constant_diag(self, eye, batch_shape, n):
# With non-constant diagonals, we cant factor out the noise as easily
self._q_cache, self._r_cache = torch.qr(
torch.cat((self._piv_chol_self / self._noise.sqrt(), eye), dim=-2)
)
self._q_cache = self._q_cache[..., :n, :] / self._noise.sqrt()
# Use the matrix determinant lemma for the logdet, using the fact that R'R = L_k'L_k + s*I
logdet = self._r_cache.diagonal(dim1=-1, dim2=-2).abs().log().sum(-1).mul(2)
logdet -= (1.0 / self._noise).log().sum([-1, -2])
self._precond_logdet_cache = (
logdet.view(*batch_shape) if len(batch_shape) else logdet.squeeze()
)
|
def _init_cache_for_non_constant_diag(self, eye, batch_shape, n):
# With non-constant diagonals, we cant factor out the noise as easily
self._q_cache, self._r_cache = torch.qr(
torch.cat((self._piv_chol_self / self._noise.sqrt(), eye))
)
self._q_cache = self._q_cache[..., :n, :] / self._noise.sqrt()
logdet = self._r_cache.diagonal(dim1=-1, dim2=-2).abs().log().sum(-1).mul(2)
logdet -= (1.0 / self._noise).log().sum([-1, -2])
self._precond_logdet_cache = (
logdet.view(*batch_shape) if len(batch_shape) else logdet.squeeze()
)
|
https://github.com/cornellius-gp/gpytorch/issues/1298
|
RuntimeError Traceback (most recent call last)
<ipython-input-4-9e151e2de37a> in <module>
24 with gpytorch.settings.max_cholesky_size(100), gpytorch.settings.min_preconditioning_size(100):
25 train_dist = gp(train_x)
---> 26 loss = -mll(train_dist, train_y).sum()
~/Code/gpytorch/gpytorch/module.py in __call__(self, *inputs, **kwargs)
26
27 def __call__(self, *inputs, **kwargs):
---> 28 outputs = self.forward(*inputs, **kwargs)
29 if isinstance(outputs, list):
30 return [_validate_module_outputs(output) for output in outputs]
~/Code/gpytorch/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, function_dist, target, *params)
49 # Get the log prob of the marginal distribution
50 output = self.likelihood(function_dist, *params)
---> 51 res = output.log_prob(target)
52
53 # Add additional terms (SGPR / learned inducing points, heteroskedastic likelihood models)
~/Code/gpytorch/gpytorch/distributions/multivariate_normal.py in log_prob(self, value)
138
139 # Get log determininat and first part of quadratic form
--> 140 inv_quad, logdet = covar.inv_quad_logdet(inv_quad_rhs=diff.unsqueeze(-1), logdet=True)
141
142 res = -0.5 * sum([inv_quad, logdet, diff.size(-1) * math.log(2 * math.pi)])
~/Code/gpytorch/gpytorch/lazy/lazy_tensor.py in inv_quad_logdet(self, inv_quad_rhs, logdet, reduce_inv_quad)
1069 probe_vectors,
1070 probe_vector_norms,
-> 1071 *args,
1072 )
1073
~/Code/gpytorch/gpytorch/functions/_inv_quad_log_det.py in forward(ctx, representation_tree, dtype, device, matrix_shape, batch_shape, inv_quad, logdet, probe_vectors, probe_vector_norms, *args)
65 lazy_tsr = ctx.representation_tree(*matrix_args)
66 with torch.no_grad():
---> 67 preconditioner, precond_lt, logdet_correction = lazy_tsr._preconditioner()
68
69 ctx.preconditioner = preconditioner
~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _preconditioner(self)
84 )
85 return None, None, None
---> 86 self._init_cache()
87
88 # NOTE: We cannot memoize this precondition closure as it causes a memory leak
~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _init_cache(self)
107 self._init_cache_for_constant_diag(eye, batch_shape, n, k)
108 else:
--> 109 self._init_cache_for_non_constant_diag(eye, batch_shape, n)
110
111 self._precond_lt = PsdSumLazyTensor(RootLazyTensor(self._piv_chol_self), self._diag_tensor)
~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _init_cache_for_non_constant_diag(self, eye, batch_shape, n)
125 # With non-constant diagonals, we cant factor out the noise as easily
126 # eye = eye.expand(*batch_shape, -1, -1)
--> 127 self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self / self._noise.sqrt(), eye)))
128 self._q_cache = self._q_cache[..., :n, :] / self._noise.sqrt()
129
RuntimeError: Tensors must have same number of dimensions: got 3 and 2
|
RuntimeError
|
def __init__(self, a, b, sigma=0.01, validate_args=False, transform=None):
TModule.__init__(self)
_a = torch.tensor(float(a)) if isinstance(a, Number) else a
_a = _a.view(-1) if _a.dim() < 1 else _a
_a, _b, _sigma = broadcast_all(_a, b, sigma)
if not torch.all(constraints.less_than(_b).check(_a)):
raise ValueError("must have that a < b (element-wise)")
# TODO: Proper argument validation including broadcasting
batch_shape, event_shape = _a.shape[:-1], _a.shape[-1:]
# need to assign values before registering as buffers to make argument validation work
self.a, self.b, self.sigma = _a, _b, _sigma
super(SmoothedBoxPrior, self).__init__(
batch_shape, event_shape, validate_args=validate_args
)
# now need to delete to be able to register buffer
del self.a, self.b, self.sigma
self.register_buffer("a", _a)
self.register_buffer("b", _b)
self.register_buffer("sigma", _sigma.clone())
self.tails = NormalPrior(torch.zeros_like(_a), _sigma, validate_args=validate_args)
self._transform = transform
|
def __init__(self, a, b, sigma=0.01, validate_args=False, transform=None):
TModule.__init__(self)
_a = torch.tensor(float(a)) if isinstance(a, Number) else a
_a = _a.view(-1) if _a.dim() < 1 else _a
_a, _b, _sigma = broadcast_all(_a, b, sigma)
if not torch.all(constraints.less_than(_b).check(_a)):
raise ValueError("must have that a < b (element-wise)")
# TODO: Proper argument validation including broadcasting
batch_shape, event_shape = _a.shape[:-1], _a.shape[-1:]
# need to assign values before registering as buffers to make argument validation work
self.a, self.b, self.sigma = _a, _b, _sigma
super(SmoothedBoxPrior, self).__init__(
batch_shape, event_shape, validate_args=validate_args
)
# now need to delete to be able to register buffer
del self.a, self.b, self.sigma
self.register_buffer("a", _a)
self.register_buffer("b", _b)
self.register_buffer("sigma", _sigma)
self.tails = NormalPrior(torch.zeros_like(_a), _sigma, validate_args=validate_args)
self._transform = transform
|
https://github.com/cornellius-gp/gpytorch/issues/1164
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-5-6b4b2e881beb> in <module>
2 import gpytorch
3 pr = gpytorch.priors.SmoothedBoxPrior(torch.zeros(2), torch.ones(2))
----> 4 pr.load_state_dict(pr.state_dict())
<...PATH..>/torch/nn/modules/module.py in load_state_dict(self, state_dict, strict)
877 if len(error_msgs) > 0:
878 raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
--> 879 self.__class__.__name__, "\n\t".join(error_msgs)))
880 return _IncompatibleKeys(missing_keys, unexpected_keys)
881
RuntimeError: Error(s) in loading state_dict for SmoothedBoxPrior:
While copying the parameter named "sigma", whose dimensions in the model are torch.Size([2]) and whose dimensions in the checkpoint are torch.Size([2]), an exception occured : ('unsupported operation: more than one element of the written-to tensor refers to a single memory location. Please clone() the tensor before performing the operation.',).
|
RuntimeError
|
def _expand_batch(self, batch_shape):
batch_dim = self.cat_dim + 2
if batch_dim < 0:
if batch_shape[batch_dim] != self.batch_shape[batch_dim]:
raise RuntimeError(
f"Trying to expand a CatLazyTensor in dimension {self.cat_dim}, but this is the concatenated "
f"dimension.\nCurrent shape: {self.shape} - expanded shape: {batch_shape + self.matrix_shape}."
)
lazy_tensors = []
for lazy_tensor in self.lazy_tensors:
sub_batch_shape = list(batch_shape).copy()
sub_batch_shape[batch_dim] = lazy_tensor.shape[self.cat_dim]
lazy_tensors.append(lazy_tensor._expand_batch(sub_batch_shape))
else:
lazy_tensors = [
lazy_tensor._expand_batch(batch_shape) for lazy_tensor in self.lazy_tensors
]
res = self.__class__(
*lazy_tensors, dim=self.cat_dim, output_device=self.output_device
)
return res
|
def _expand_batch(self, batch_shape):
lazy_tensors = [
lazy_tensor._expand_batch(batch_shape) for lazy_tensor in self.lazy_tensors
]
res = self.__class__(
*lazy_tensors, dim=self.cat_dim, output_device=self.output_device
)
return res
|
https://github.com/cornellius-gp/gpytorch/issues/1133
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-16-0d626e17adaf> in <module>
----> 1 cmtlt + torch.randn(100, 100)
~/Documents/GitHub/gpytorch/gpytorch/lazy/lazy_tensor.py in __add__(self, other)
1625 other = lazify(other)
1626 shape = _mul_broadcast_shape(self.shape, other.shape)
-> 1627 return SumLazyTensor(self.expand(shape), other.expand(shape))
1628 else:
1629 return SumLazyTensor(self, other)
~/Documents/GitHub/gpytorch/gpytorch/lazy/lazy_tensor.py in expand(self, *sizes)
857 raise RuntimeError("Invalid arguments {} to expand.".format(sizes))
858
--> 859 res = self._expand_batch(batch_shape=shape[:-2])
860 return res
861
~/Documents/GitHub/gpytorch/gpytorch/lazy/constant_mul_lazy_tensor.py in _expand_batch(self, batch_shape)
70 def _expand_batch(self, batch_shape):
---> 71 return self.__class__(self.base_lazy_tensor._expand_batch(batch_shape), self._constant.expand(*batch_shape))
72
73 def _get_indices(self, row_index, col_index, *batch_indices):
TypeError: expand() missing 1 required positional arguments: "size"
|
TypeError
|
def _expand_batch(self, batch_shape):
return self.__class__(
self.base_lazy_tensor._expand_batch(batch_shape),
self._constant.expand(*batch_shape) if len(batch_shape) else self._constant,
)
|
def _expand_batch(self, batch_shape):
return self.__class__(
self.base_lazy_tensor._expand_batch(batch_shape),
self._constant.expand(*batch_shape),
)
|
https://github.com/cornellius-gp/gpytorch/issues/1133
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-16-0d626e17adaf> in <module>
----> 1 cmtlt + torch.randn(100, 100)
~/Documents/GitHub/gpytorch/gpytorch/lazy/lazy_tensor.py in __add__(self, other)
1625 other = lazify(other)
1626 shape = _mul_broadcast_shape(self.shape, other.shape)
-> 1627 return SumLazyTensor(self.expand(shape), other.expand(shape))
1628 else:
1629 return SumLazyTensor(self, other)
~/Documents/GitHub/gpytorch/gpytorch/lazy/lazy_tensor.py in expand(self, *sizes)
857 raise RuntimeError("Invalid arguments {} to expand.".format(sizes))
858
--> 859 res = self._expand_batch(batch_shape=shape[:-2])
860 return res
861
~/Documents/GitHub/gpytorch/gpytorch/lazy/constant_mul_lazy_tensor.py in _expand_batch(self, batch_shape)
70 def _expand_batch(self, batch_shape):
---> 71 return self.__class__(self.base_lazy_tensor._expand_batch(batch_shape), self._constant.expand(*batch_shape))
72
73 def _get_indices(self, row_index, col_index, *batch_indices):
TypeError: expand() missing 1 required positional arguments: "size"
|
TypeError
|
def __add__(self, other):
"""
Return a :obj:`gpytorch.lazy.LazyTensor` that represents the sum of this lazy tensor and another matrix
or lazy tensor.
Args:
:attr:`other` (:obj:`torch.tensor` or :obj:`gpytorch.lazy.LazyTensor`):
Matrix to add to this one.
Returns:
:obj:`gpytorch.lazy.SumLazyTensor`:
A sum lazy tensor representing the sum of this lazy tensor and other.
"""
from .sum_lazy_tensor import SumLazyTensor
from .zero_lazy_tensor import ZeroLazyTensor
from .diag_lazy_tensor import DiagLazyTensor
from .added_diag_lazy_tensor import AddedDiagLazyTensor
from .non_lazy_tensor import lazify
from torch import Tensor
if isinstance(other, ZeroLazyTensor):
return self
elif isinstance(other, DiagLazyTensor):
return AddedDiagLazyTensor(self, other)
elif isinstance(other, Tensor):
other = lazify(other)
shape = _mul_broadcast_shape(self.shape, other.shape)
new_self = self if self.shape == shape else self._expand_batch(shape[:-2])
new_other = other if other.shape == shape else other._expand_batch(shape[:-2])
return SumLazyTensor(new_self, new_other)
else:
return SumLazyTensor(self, other)
|
def __add__(self, other):
"""
Return a :obj:`gpytorch.lazy.LazyTensor` that represents the sum of this lazy tensor and another matrix
or lazy tensor.
Args:
:attr:`other` (:obj:`torch.tensor` or :obj:`gpytorch.lazy.LazyTensor`):
Matrix to add to this one.
Returns:
:obj:`gpytorch.lazy.SumLazyTensor`:
A sum lazy tensor representing the sum of this lazy tensor and other.
"""
from .sum_lazy_tensor import SumLazyTensor
from .zero_lazy_tensor import ZeroLazyTensor
from .diag_lazy_tensor import DiagLazyTensor
from .added_diag_lazy_tensor import AddedDiagLazyTensor
from .non_lazy_tensor import lazify
from torch import Tensor
if isinstance(other, ZeroLazyTensor):
return self
elif isinstance(other, DiagLazyTensor):
return AddedDiagLazyTensor(self, other)
elif isinstance(other, Tensor):
other = lazify(other)
shape = _mul_broadcast_shape(self.shape, other.shape)
return SumLazyTensor(self.expand(shape), other.expand(shape))
else:
return SumLazyTensor(self, other)
|
https://github.com/cornellius-gp/gpytorch/issues/1133
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-16-0d626e17adaf> in <module>
----> 1 cmtlt + torch.randn(100, 100)
~/Documents/GitHub/gpytorch/gpytorch/lazy/lazy_tensor.py in __add__(self, other)
1625 other = lazify(other)
1626 shape = _mul_broadcast_shape(self.shape, other.shape)
-> 1627 return SumLazyTensor(self.expand(shape), other.expand(shape))
1628 else:
1629 return SumLazyTensor(self, other)
~/Documents/GitHub/gpytorch/gpytorch/lazy/lazy_tensor.py in expand(self, *sizes)
857 raise RuntimeError("Invalid arguments {} to expand.".format(sizes))
858
--> 859 res = self._expand_batch(batch_shape=shape[:-2])
860 return res
861
~/Documents/GitHub/gpytorch/gpytorch/lazy/constant_mul_lazy_tensor.py in _expand_batch(self, batch_shape)
70 def _expand_batch(self, batch_shape):
---> 71 return self.__class__(self.base_lazy_tensor._expand_batch(batch_shape), self._constant.expand(*batch_shape))
72
73 def _get_indices(self, row_index, col_index, *batch_indices):
TypeError: expand() missing 1 required positional arguments: "size"
|
TypeError
|
def __add__(self, other):
from .diag_lazy_tensor import DiagLazyTensor
from .added_diag_lazy_tensor import AddedDiagLazyTensor
if isinstance(other, ZeroLazyTensor):
return self
elif isinstance(other, DiagLazyTensor):
return AddedDiagLazyTensor(self, other)
elif isinstance(other, SumLazyTensor):
return SumLazyTensor(*(list(self.lazy_tensors) + list(other.lazy_tensors)))
elif isinstance(other, LazyTensor):
return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
elif isinstance(other, Tensor):
# get broadcast shape, assuming mul broadcasting the same as add broadcasting
broadcasted_shape = _mul_broadcast_shape(self.shape, other.shape)
# lazify + broadcast other
broadcasted_other = lazify(other.expand(broadcasted_shape))
# update the lazy tensors' shape as well
new_self = (
self
if broadcasted_shape == self.shape
else self._expand_batch(broadcasted_shape[:-2])
)
return SumLazyTensor(*(list(new_self.lazy_tensors) + [broadcasted_other]))
else:
raise AttributeError("other must be a LazyTensor")
|
def __add__(self, other):
from .diag_lazy_tensor import DiagLazyTensor
from .added_diag_lazy_tensor import AddedDiagLazyTensor
if isinstance(other, ZeroLazyTensor):
return self
elif isinstance(other, DiagLazyTensor):
return AddedDiagLazyTensor(self, other)
elif isinstance(other, SumLazyTensor):
return SumLazyTensor(*(list(self.lazy_tensors) + list(other.lazy_tensors)))
elif isinstance(other, LazyTensor):
return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
elif isinstance(other, Tensor):
# get broadcast shape, assuming mul broadcasting the same as add broadcasting
broadcasted_shape = _mul_broadcast_shape(self.shape, other.shape)
# lazify + broadcast other
broadcasted_other = lazify(other.expand(broadcasted_shape))
# update the lazy tensors' shape as well
if broadcasted_shape != self.shape:
broadcasted_lts = [
lt.expand(*broadcasted_shape, 1).squeeze(-1).transpose(-1, -2)
for lt in self.lazy_tensors
]
else:
broadcasted_lts = list(self.lazy_tensors)
return SumLazyTensor(*(broadcasted_lts + [broadcasted_other]))
else:
raise AttributeError("other must be a LazyTensor")
|
https://github.com/cornellius-gp/gpytorch/issues/1133
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-16-0d626e17adaf> in <module>
----> 1 cmtlt + torch.randn(100, 100)
~/Documents/GitHub/gpytorch/gpytorch/lazy/lazy_tensor.py in __add__(self, other)
1625 other = lazify(other)
1626 shape = _mul_broadcast_shape(self.shape, other.shape)
-> 1627 return SumLazyTensor(self.expand(shape), other.expand(shape))
1628 else:
1629 return SumLazyTensor(self, other)
~/Documents/GitHub/gpytorch/gpytorch/lazy/lazy_tensor.py in expand(self, *sizes)
857 raise RuntimeError("Invalid arguments {} to expand.".format(sizes))
858
--> 859 res = self._expand_batch(batch_shape=shape[:-2])
860 return res
861
~/Documents/GitHub/gpytorch/gpytorch/lazy/constant_mul_lazy_tensor.py in _expand_batch(self, batch_shape)
70 def _expand_batch(self, batch_shape):
---> 71 return self.__class__(self.base_lazy_tensor._expand_batch(batch_shape), self._constant.expand(*batch_shape))
72
73 def _get_indices(self, row_index, col_index, *batch_indices):
TypeError: expand() missing 1 required positional arguments: "size"
|
TypeError
|
def initialize_from_data_empspect(self, train_x, train_y):
"""
Initialize mixture components based on the empirical spectrum of the data.
This will often be better than the standard initialize_from_data method.
"""
import numpy as np
from scipy.fftpack import fft
from scipy.integrate import cumtrapz
if not torch.is_tensor(train_x) or not torch.is_tensor(train_y):
raise RuntimeError("train_x and train_y should be tensors")
if train_x.ndimension() == 1:
train_x = train_x.unsqueeze(-1)
N = train_x.size(-2)
emp_spect = np.abs(fft(train_y.cpu().detach().numpy())) ** 2 / N
M = math.floor(N / 2)
freq1 = np.arange(M + 1)
freq2 = np.arange(-M + 1, 0)
freq = np.hstack((freq1, freq2)) / N
freq = freq[: M + 1]
emp_spect = emp_spect[: M + 1]
total_area = np.trapz(emp_spect, freq)
spec_cdf = np.hstack((np.zeros(1), cumtrapz(emp_spect, freq)))
spec_cdf = spec_cdf / total_area
a = np.random.rand(1000, self.ard_num_dims)
p, q = np.histogram(a, spec_cdf)
bins = np.digitize(a, q)
slopes = (spec_cdf[bins] - spec_cdf[bins - 1]) / (freq[bins] - freq[bins - 1])
intercepts = spec_cdf[bins - 1] - slopes * freq[bins - 1]
inv_spec = (a - intercepts) / slopes
from sklearn.mixture import GaussianMixture
GMM = GaussianMixture(n_components=self.num_mixtures, covariance_type="diag").fit(
inv_spec
)
means = GMM.means_
varz = GMM.covariances_
weights = GMM.weights_
self.mixture_means = means
self.mixture_scales = varz
self.mixture_weights = weights
|
def initialize_from_data_empspect(self, train_x, train_y):
"""
Initialize mixture components based on the empirical spectrum of the data.
This will often be better than the standard initialize_from_data method.
"""
import numpy as np
from scipy.fftpack import fft
from scipy.integrate import cumtrapz
N = train_x.size(-2)
emp_spect = np.abs(fft(train_y.cpu().detach().numpy())) ** 2 / N
M = math.floor(N / 2)
freq1 = np.arange(M + 1)
freq2 = np.arange(-M + 1, 0)
freq = np.hstack((freq1, freq2)) / N
freq = freq[: M + 1]
emp_spect = emp_spect[: M + 1]
total_area = np.trapz(emp_spect, freq)
spec_cdf = np.hstack((np.zeros(1), cumtrapz(emp_spect, freq)))
spec_cdf = spec_cdf / total_area
a = np.random.rand(1000, self.ard_num_dims)
p, q = np.histogram(a, spec_cdf)
bins = np.digitize(a, q)
slopes = (spec_cdf[bins] - spec_cdf[bins - 1]) / (freq[bins] - freq[bins - 1])
intercepts = spec_cdf[bins - 1] - slopes * freq[bins - 1]
inv_spec = (a - intercepts) / slopes
from sklearn.mixture import GaussianMixture
GMM = GaussianMixture(n_components=self.num_mixtures, covariance_type="diag").fit(
inv_spec
)
means = GMM.means_
varz = GMM.covariances_
weights = GMM.weights_
self.mixture_means = means
self.mixture_scales = varz
self.mixture_weights = weights
|
https://github.com/cornellius-gp/gpytorch/issues/1166
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-18-3796fe5ce2f5> in <module>
19
20 likelihood = gpytorch.likelihoods.GaussianLikelihood()
---> 21 model = ExactGPModel(train_x, train_y, likelihood)
<ipython-input-5-6b823c7240a5> in __init__(self, train_x, train_y, likelihood)
7 self.covar_module = gpytorch.kernels.SpectralMixtureKernel(num_mixtures=4)
8 #self.covar_module.initialize_from_data(train_x, train_y)
----> 9 self.covar_module.initialize_from_data_empspect(train_x, train_y)
10
11 def forward(self, x):
~/miniconda3/envs/GP/lib/python3.7/site-packages/gpytorch/kernels/spectral_mixture_kernel.py in initialize_from_data_empspect(self, train_x, train_y)
159 from scipy.integrate import cumtrapz
160
--> 161 N = train_x.size(-2)
162 emp_spect = np.abs(fft(train_y.cpu().detach().numpy())) ** 2 / N
163 M = math.floor(N / 2)
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got -2)
|
IndexError
|
def _getitem(self, row_index, col_index, *batch_indices):
"""
Supports subindexing of the matrix this LazyTensor represents.
The indices passed into this method will either be:
Tensor indices
Slices
..note::
LazyTensor.__getitem__ uses this as a helper method. If you are writing your own custom LazyTensor,
override this method rather than __getitem__ (so that you don't have to repeat the extra work)
..note::
This method is used internally by the related function :func:`~gpytorch.lazy.LazyTensor.__getitem__`,
which does some additional work. Calling this method directly is discouraged.
This method has a number of restrictions on the type of arguments that are passed in to reduce
the complexity of __getitem__ calls in PyTorch. In particular:
- This method only accepts slices and tensors for the row/column indices (no ints)
- The row and column dimensions don't dissapear (e.g. from Tensor indexing). These cases are
handled by the `_getindices` method
Args:
:attr:`row_index` (slice, Tensor):
Index for the row of the LazyTensor
:attr:`col_index` (slice, Tensor):
Index for the col of the LazyTensor
:attr:`batch_indices` (tuple of slice, int, Tensor):
Indices for the batch dimensions
Returns:
`LazyTensor`
"""
# Special case: if both row and col are not indexed, then we are done
if _is_noop_index(row_index) and _is_noop_index(col_index):
if len(batch_indices):
components = [component[batch_indices] for component in self._args]
res = self.__class__(*components, **self._kwargs)
return res
else:
return self
# Normal case: we have to do some processing on either the rows or columns
# We will handle this through "interpolation"
row_interp_indices = torch.arange(
0, self.size(-2), dtype=torch.long, device=self.device
).view(-1, 1)
row_interp_indices = row_interp_indices.expand(*self.batch_shape, -1, 1)
row_interp_values = torch.tensor(
1.0, dtype=self.dtype, device=self.device
).expand_as(row_interp_indices)
col_interp_indices = torch.arange(
0, self.size(-1), dtype=torch.long, device=self.device
).view(-1, 1)
col_interp_indices = col_interp_indices.expand(*self.batch_shape, -1, 1)
col_interp_values = torch.tensor(
1.0, dtype=self.dtype, device=self.device
).expand_as(col_interp_indices)
# Construct interpolated LazyTensor
from . import InterpolatedLazyTensor
res = InterpolatedLazyTensor(
self,
row_interp_indices,
row_interp_values,
col_interp_indices,
col_interp_values,
)
return res._getitem(row_index, col_index, *batch_indices)
|
def _getitem(self, row_index, col_index, *batch_indices):
"""
Supports subindexing of the matrix this LazyTensor represents.
The indices passed into this method will either be:
Tensor indices
Slices
..note::
LazyTensor.__getitem__ uses this as a helper method. If you are writing your own custom LazyTensor,
override this method rather than __getitem__ (so that you don't have to repeat the extra work)
..note::
This method is used internally by the related function :func:`~gpytorch.lazy.LazyTensor.__getitem__`,
which does some additional work. Calling this method directly is discouraged.
This method has a number of restrictions on the type of arguments that are passed in to reduce
the complexity of __getitem__ calls in PyTorch. In particular:
- This method only accepts slices and tensors for the row/column indices (no ints)
- The row and column dimensions don't dissapear (e.g. from Tensor indexing). These cases are
handled by the `_getindices` method
Args:
:attr:`row_index` (slice, Tensor):
Index for the row of the LazyTensor
:attr:`col_index` (slice, Tensor):
Index for the col of the LazyTensor
:attr:`batch_indices` (tuple of slice, int, Tensor):
Indices for the batch dimensions
Returns:
`LazyTensor`
"""
# Special case: if both row and col are not indexed, then we are done
if row_index is _noop_index and col_index is _noop_index:
if len(batch_indices):
components = [component[batch_indices] for component in self._args]
res = self.__class__(*components, **self._kwargs)
return res
else:
return self
# Normal case: we have to do some processing on either the rows or columns
# We will handle this through "interpolation"
row_interp_indices = torch.arange(
0, self.size(-2), dtype=torch.long, device=self.device
).view(-1, 1)
row_interp_indices = row_interp_indices.expand(*self.batch_shape, -1, 1)
row_interp_values = torch.tensor(
1.0, dtype=self.dtype, device=self.device
).expand_as(row_interp_indices)
col_interp_indices = torch.arange(
0, self.size(-1), dtype=torch.long, device=self.device
).view(-1, 1)
col_interp_indices = col_interp_indices.expand(*self.batch_shape, -1, 1)
col_interp_values = torch.tensor(
1.0, dtype=self.dtype, device=self.device
).expand_as(col_interp_indices)
# Construct interpolated LazyTensor
from . import InterpolatedLazyTensor
res = InterpolatedLazyTensor(
self,
row_interp_indices,
row_interp_values,
col_interp_indices,
col_interp_values,
)
return res._getitem(row_index, col_index, *batch_indices)
|
https://github.com/cornellius-gp/gpytorch/issues/1065
|
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/gpytorch/lazy/lazy_evaluated_kernel_tensor.py", line 88, in _getitem
x1 = x1[(*batch_indices, row_index, dim_index)]
IndexError: too many indices for tensor of dimension 2
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/johnsonzhong/Research/meta_contact/simulation/temp.py", line 70, in <module>
lower, upper = predictions.confidence_region()
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/gpytorch/distributions/multivariate_normal.py", line 80, in confidence_region
std2 = self.stddev.mul_(2)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/torch/distributions/distribution.py", line 111, in stddev
return self.variance.sqrt()
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/gpytorch/distributions/multitask_multivariate_normal.py", line 219, in variance
var = super().variance
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/gpytorch/distributions/multivariate_normal.py", line 189, in variance
diag = self.lazy_covariance_matrix.diag()
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py", line 96, in diag
return sum(lazy_tensor.diag().contiguous() for lazy_tensor in self.lazy_tensors)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py", line 96, in <genexpr>
return sum(lazy_tensor.diag().contiguous() for lazy_tensor in self.lazy_tensors)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/gpytorch/lazy/interpolated_lazy_tensor.py", line 387, in diag
return super(InterpolatedLazyTensor, self).diag()
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/gpytorch/lazy/lazy_tensor.py", line 839, in diag
return self[..., row_col_iter, row_col_iter]
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/gpytorch/lazy/lazy_tensor.py", line 1716, in __getitem__
res = self._get_indices(row_index, col_index, *batch_indices)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/gpytorch/lazy/interpolated_lazy_tensor.py", line 110, in _get_indices
*[batch_index.view(*batch_index.shape, 1, 1) for batch_index in batch_indices],
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/gpytorch/lazy/block_interleaved_lazy_tensor.py", line 58, in _get_indices
res = self.base_lazy_tensor._get_indices(row_index, col_index, *batch_indices, row_index_block)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/gpytorch/lazy/lazy_tensor.py", line 294, in _get_indices
base_lazy_tensor = self._getitem(_noop_index, _noop_index, *batch_indices)._expand_batch(final_shape)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/gpytorch/lazy/lazy_evaluated_kernel_tensor.py", line 97, in _getitem
f"Attempting to tensor index a non-batch matrix's batch dimensions. "
RuntimeError: Attempting to tensor index a non-batch matrix's batch dimensions. Got batch index {batch_indices} but my shape was {self.shape}
|
IndexError
|
def _shaped_noise_covar(self, base_shape: torch.Size, *params: Any, **kwargs: Any):
return self.noise_covar(*params, shape=base_shape, **kwargs)
|
def _shaped_noise_covar(self, base_shape: torch.Size, *params: Any, **kwargs: Any):
if len(params) > 0:
# we can infer the shape from the params
shape = None
else:
# here shape[:-1] is the batch shape requested, and shape[-1] is `n`, the number of points
shape = base_shape
return self.noise_covar(*params, shape=shape, **kwargs)
|
https://github.com/cornellius-gp/gpytorch/issues/1084
|
Traceback (most recent call last):
File "<ipython-input-1-88b9df752e72>", line 1, in <module>
runfile('/home/user/Desktop/dl-dev python/gp_examples/test.py', wdir='/home/user/Desktop/dl-dev python/gp_examples')
File "/home/user/anaconda3/envs/dkl/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py", line 827, in runfile
execfile(filename, namespace)
File "/home/user/anaconda3/envs/dkl/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "/home/user/Desktop/dl-dev python/gp_examples/test.py", line 136, in <module>
preds = model(test_x)
File "/home/user/anaconda3/envs/dkl/lib/python3.6/site-packages/gpytorch/models/exact_gp.py", line 294, in __call__
likelihood=self.likelihood,
File "/home/user/anaconda3/envs/dkl/lib/python3.6/site-packages/gpytorch/models/exact_prediction_strategies.py", line 36, in prediction_strategy
return cls(train_inputs, train_prior_dist, train_labels, likelihood)
File "/home/user/anaconda3/envs/dkl/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py", line 205, in prediction_strategy
return InterpolatedPredictionStrategy(train_inputs, train_prior_dist, train_labels, likelihood)
File "/home/user/anaconda3/envs/dkl/lib/python3.6/site-packages/gpytorch/models/exact_prediction_strategies.py", line 380, in __init__
super().__init__(train_inputs, train_prior_dist, train_labels, likelihood)
File "/home/user/anaconda3/envs/dkl/lib/python3.6/site-packages/gpytorch/models/exact_prediction_strategies.py", line 50, in __init__
mvn = self.likelihood(train_prior_dist, train_inputs)
File "/home/user/anaconda3/envs/dkl/lib/python3.6/site-packages/gpytorch/likelihoods/likelihood.py", line 313, in __call__
return self.marginal(input, *args, **kwargs)
File "/home/user/anaconda3/envs/dkl/lib/python3.6/site-packages/gpytorch/likelihoods/gaussian_likelihood.py", line 76, in marginal
full_covar = covar + noise_covar
File "/home/user/anaconda3/envs/dkl/lib/python3.6/site-packages/gpytorch/lazy/lazy_tensor.py", line 1626, in __add__
return AddedDiagLazyTensor(self, other)
File "/home/user/anaconda3/envs/dkl/lib/python3.6/site-packages/gpytorch/lazy/added_diag_lazy_tensor.py", line 27, in __init__
broadcasting._mul_broadcast_shape(lazy_tensors[0].shape, lazy_tensors[1].shape)
File "/home/user/anaconda3/envs/dkl/lib/python3.6/site-packages/gpytorch/utils/broadcasting.py", line 20, in _mul_broadcast_shape
raise RuntimeError("Shapes are not broadcastable for mul operation")
RuntimeError: Shapes are not broadcastable for mul operatio
|
RuntimeError
|
def forward(self, x1, x2, diag=False, **params):
x1_ = x1.div(self.period_length)
x2_ = x2.div(self.period_length)
diff = self.covar_dist(x1_, x2_, diag=diag, **params)
res = torch.sin(diff.mul(math.pi)).pow(2).mul(-2 / self.lengthscale).exp_()
if diag:
res = res.squeeze(0)
return res
|
def forward(self, x1, x2, diag=False, **params):
x1_ = x1.div(self.period_length)
x2_ = x2.div(self.period_length)
diff = self.covar_dist(x1_, x2_, diag=diag, **params)
res = torch.sin(diff.mul(math.pi)).pow(2).mul(-2 / self.lengthscale).exp_()
if diff.ndimension() == 2 or diag:
res = res.squeeze(0)
return res
|
https://github.com/cornellius-gp/gpytorch/issues/1011
|
Traceback (most recent call last):
File "<ipython-input-13-effc04c4ab77>", line 61, in <module>
loss = -mll(output, train_y) ## NOTE fails here.
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/module.py", line 24, in __call__
outputs = self.forward(*inputs, **kwargs)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py", line 51, in forward
res = output.log_prob(target)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/distributions/multivariate_normal.py", line 135, in log_prob
inv_quad, logdet = covar.inv_quad_logdet(inv_quad_rhs=diff.unsqueeze(-1), logdet=True)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_tensor.py", line 1038, in inv_quad_logdet
args = self.representation()
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_tensor.py", line 1268, in representation
representation += list(arg.representation())
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_evaluated_kernel_tensor.py", line 311, in representation
return self.evaluate_kernel().representation()
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/utils/memoize.py", line 34, in g
add_to_cache(self, cache_name, method(self, *args, **kwargs))
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_evaluated_kernel_tensor.py", line 278, in evaluate_kernel
res = self.kernel(x1, x2, diag=False, last_dim_is_batch=self.last_dim_is_batch, **self.params)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/kernel.py", line 395, in __call__
res = super(Kernel, self).__call__(x1_, x2_, last_dim_is_batch=last_dim_is_batch, **params)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/module.py", line 24, in __call__
outputs = self.forward(*inputs, **kwargs)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/scale_kernel.py", line 90, in forward
orig_output = self.base_kernel.forward(x1, x2, diag=diag, last_dim_is_batch=last_dim_is_batch, **params)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_interpolation_kernel.py", line 177, in forward
base_lazy_tsr = lazify(self._inducing_forward(last_dim_is_batch=last_dim_is_batch, **params))
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_interpolation_kernel.py", line 143, in _inducing_forward
return super().forward(self.grid, self.grid, last_dim_is_batch=last_dim_is_batch, **params)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_kernel.py", line 133, in forward
covars = [ToeplitzLazyTensor(c.squeeze(-2)) for c in covars]
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_kernel.py", line 133, in <listcomp>
covars = [ToeplitzLazyTensor(c.squeeze(-2)) for c in covars]
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got -2)
|
IndexError
|
def expand(self, expand_shape, _instance=None):
batch_shape = torch.Size(expand_shape)
return HorseshoePrior(self.scale.expand(batch_shape))
|
def expand(self, expand_shape, _instance=None):
new = self._get_checked_instance(HorseshoePrior)
batch_shape = torch.Size(expand_shape)
new.scale = self.scale.expand(batch_shape)
super(Distribution, new).__init__(batch_shape)
new._validate_args = self._validate_args
return new
|
https://github.com/cornellius-gp/gpytorch/issues/976
|
Warmup: 0%| | 0/300 [00:00, ?it/s]
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-22-1dcdc695642b> in <module>
9 nuts_kernel = pyro.infer.mcmc.NUTS(pyro_model, adapt_step_size=True)
10 mcmc_run = pyro.infer.mcmc.MCMC(nuts_kernel, num_samples=100, warmup_steps=200)
---> 11 mcmc_run.run(train_x, train_y)
~/.local/lib/python3.8/site-packages/pyro/poutine/messenger.py in _context_wrap(context, fn, *args, **kwargs)
6 def _context_wrap(context, fn, *args, **kwargs):
7 with context:
----> 8 return fn(*args, **kwargs)
9
10
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/api.py in run(self, *args, **kwargs)
352 z_flat_acc = [[] for _ in range(self.num_chains)]
353 with pyro.validation_enabled(not self.disable_validation):
--> 354 for x, chain_id in self.sampler.run(*args, **kwargs):
355 if num_samples[chain_id] == 0:
356 num_samples[chain_id] += 1
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/api.py in run(self, *args, **kwargs)
161 logger = initialize_logger(logger, "", progress_bar)
162 hook_w_logging = _add_logging_hook(logger, progress_bar, self.hook)
--> 163 for sample in _gen_samples(self.kernel, self.warmup_steps, self.num_samples, hook_w_logging,
164 i if self.num_chains > 1 else None,
165 *args, **kwargs):
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/api.py in _gen_samples(kernel, warmup_steps, num_samples, hook, chain_id, *args, **kwargs)
105
106 def _gen_samples(kernel, warmup_steps, num_samples, hook, chain_id, *args, **kwargs):
--> 107 kernel.setup(warmup_steps, *args, **kwargs)
108 params = kernel.initial_params
109 # yield structure (key, value.shape) of params
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/hmc.py in setup(self, warmup_steps, *args, **kwargs)
258 self._warmup_steps = warmup_steps
259 if self.model is not None:
--> 260 self._initialize_model_properties(args, kwargs)
261 potential_energy = self.potential_fn(self.initial_params)
262 self._cache(self.initial_params, potential_energy, None)
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/hmc.py in _initialize_model_properties(self, model_args, model_kwargs)
223
224 def _initialize_model_properties(self, model_args, model_kwargs):
--> 225 init_params, potential_fn, transforms, trace = initialize_model(
226 self.model,
227 model_args,
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/util.py in initialize_model(model, model_args, model_kwargs, transforms, max_plate_nesting, jit_compile, jit_options, skip_jit_warnings, num_chains)
363 automatic_transform_enabled = False
364 if max_plate_nesting is None:
--> 365 max_plate_nesting = _guess_max_plate_nesting(model, model_args, model_kwargs)
366 # Wrap model in `poutine.enum` to enumerate over discrete latent sites.
367 # No-op if model does not have any discrete latents.
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/util.py in _guess_max_plate_nesting(model, args, kwargs)
231 """
232 with poutine.block():
--> 233 model_trace = poutine.trace(model).get_trace(*args, **kwargs)
234 sites = [site for site in model_trace.nodes.values()
235 if site["type"] == "sample"]
~/.local/lib/python3.8/site-packages/pyro/poutine/trace_messenger.py in get_trace(self, *args, **kwargs)
161 Calls this poutine and returns its trace instead of the function's return value.
162 """
--> 163 self(*args, **kwargs)
164 return self.msngr.get_trace()
~/.local/lib/python3.8/site-packages/pyro/poutine/trace_messenger.py in __call__(self, *args, **kwargs)
141 args=args, kwargs=kwargs)
142 try:
--> 143 ret = self.fn(*args, **kwargs)
144 except (ValueError, RuntimeError):
145 exc_type, exc_value, traceback = sys.exc_info()
<ipython-input-22-1dcdc695642b> in pyro_model(x, y)
2
3 def pyro_model(x, y):
----> 4 model.pyro_sample_from_prior()
5 output = model(x)
6 loss = mll.pyro_factor(output, y)
~/.local/lib/python3.8/site-packages/gpytorch/module.py in pyro_sample_from_prior(self)
287 parameters of the model that have GPyTorch priors registered to them.
288 """
--> 289 return _pyro_sample_from_prior(module=self, memo=None, prefix="")
290
291 def local_load_samples(self, samples_dict, memo, prefix):
~/.local/lib/python3.8/site-packages/gpytorch/module.py in _pyro_sample_from_prior(module, memo, prefix)
396 for mname, module_ in module.named_children():
397 submodule_prefix = prefix + ("." if prefix else "") + mname
--> 398 _pyro_sample_from_prior(module=module_, memo=memo, prefix=submodule_prefix)
399
400
~/.local/lib/python3.8/site-packages/gpytorch/module.py in _pyro_sample_from_prior(module, memo, prefix)
391 memo.add(prior)
392 prior = prior.expand(closure().shape)
--> 393 value = pyro.sample(prefix + ("." if prefix else "") + prior_name, prior)
394 setting_closure(value)
395
~/.local/lib/python3.8/site-packages/pyro/primitives.py in sample(name, fn, *args, **kwargs)
108 msg["is_observed"] = True
109 # apply the stack and return its return value
--> 110 apply_stack(msg)
111 return msg["value"]
112
~/.local/lib/python3.8/site-packages/pyro/poutine/runtime.py in apply_stack(initial_msg)
193 break
194
--> 195 default_process_message(msg)
196
197 for frame in stack[-pointer:]:
~/.local/lib/python3.8/site-packages/pyro/poutine/runtime.py in default_process_message(msg)
154 return msg
155
--> 156 msg["value"] = msg["fn"](*msg["args"], **msg["kwargs"])
157
158 # after fn has been called, update msg to prevent it from being called again.
~/.local/lib/python3.8/site-packages/pyro/distributions/torch_distribution.py in __call__(self, sample_shape)
38 :rtype: torch.Tensor
39 """
---> 40 return self.rsample(sample_shape) if self.has_rsample else self.sample(sample_shape)
41
42 @property
/usr/lib/python3.8/site-packages/torch/distributions/gamma.py in rsample(self, sample_shape)
59 def rsample(self, sample_shape=torch.Size()):
60 shape = self._extended_shape(sample_shape)
---> 61 value = _standard_gamma(self.concentration.expand(shape)) / self.rate.expand(shape)
62 value.detach().clamp_(min=torch.finfo(value.dtype).tiny) # do not record in autograd graph
63 return value
/usr/lib/python3.8/site-packages/torch/nn/modules/module.py in __getattr__(self, name)
582 if name in modules:
583 return modules[name]
--> 584 raise AttributeError("'{}' object has no attribute '{}'".format(
585 type(self).__name__, name))
586
AttributeError: 'GammaPrior' object has no attribute 'concentration'
|
AttributeError
|
def expand(self, batch_shape):
batch_shape = torch.Size(batch_shape)
return NormalPrior(self.loc.expand(batch_shape), self.scale.expand(batch_shape))
|
def expand(self, batch_shape):
return Normal.expand(self, batch_shape, _instance=self)
|
https://github.com/cornellius-gp/gpytorch/issues/976
|
Warmup: 0%| | 0/300 [00:00, ?it/s]
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-22-1dcdc695642b> in <module>
9 nuts_kernel = pyro.infer.mcmc.NUTS(pyro_model, adapt_step_size=True)
10 mcmc_run = pyro.infer.mcmc.MCMC(nuts_kernel, num_samples=100, warmup_steps=200)
---> 11 mcmc_run.run(train_x, train_y)
~/.local/lib/python3.8/site-packages/pyro/poutine/messenger.py in _context_wrap(context, fn, *args, **kwargs)
6 def _context_wrap(context, fn, *args, **kwargs):
7 with context:
----> 8 return fn(*args, **kwargs)
9
10
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/api.py in run(self, *args, **kwargs)
352 z_flat_acc = [[] for _ in range(self.num_chains)]
353 with pyro.validation_enabled(not self.disable_validation):
--> 354 for x, chain_id in self.sampler.run(*args, **kwargs):
355 if num_samples[chain_id] == 0:
356 num_samples[chain_id] += 1
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/api.py in run(self, *args, **kwargs)
161 logger = initialize_logger(logger, "", progress_bar)
162 hook_w_logging = _add_logging_hook(logger, progress_bar, self.hook)
--> 163 for sample in _gen_samples(self.kernel, self.warmup_steps, self.num_samples, hook_w_logging,
164 i if self.num_chains > 1 else None,
165 *args, **kwargs):
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/api.py in _gen_samples(kernel, warmup_steps, num_samples, hook, chain_id, *args, **kwargs)
105
106 def _gen_samples(kernel, warmup_steps, num_samples, hook, chain_id, *args, **kwargs):
--> 107 kernel.setup(warmup_steps, *args, **kwargs)
108 params = kernel.initial_params
109 # yield structure (key, value.shape) of params
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/hmc.py in setup(self, warmup_steps, *args, **kwargs)
258 self._warmup_steps = warmup_steps
259 if self.model is not None:
--> 260 self._initialize_model_properties(args, kwargs)
261 potential_energy = self.potential_fn(self.initial_params)
262 self._cache(self.initial_params, potential_energy, None)
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/hmc.py in _initialize_model_properties(self, model_args, model_kwargs)
223
224 def _initialize_model_properties(self, model_args, model_kwargs):
--> 225 init_params, potential_fn, transforms, trace = initialize_model(
226 self.model,
227 model_args,
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/util.py in initialize_model(model, model_args, model_kwargs, transforms, max_plate_nesting, jit_compile, jit_options, skip_jit_warnings, num_chains)
363 automatic_transform_enabled = False
364 if max_plate_nesting is None:
--> 365 max_plate_nesting = _guess_max_plate_nesting(model, model_args, model_kwargs)
366 # Wrap model in `poutine.enum` to enumerate over discrete latent sites.
367 # No-op if model does not have any discrete latents.
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/util.py in _guess_max_plate_nesting(model, args, kwargs)
231 """
232 with poutine.block():
--> 233 model_trace = poutine.trace(model).get_trace(*args, **kwargs)
234 sites = [site for site in model_trace.nodes.values()
235 if site["type"] == "sample"]
~/.local/lib/python3.8/site-packages/pyro/poutine/trace_messenger.py in get_trace(self, *args, **kwargs)
161 Calls this poutine and returns its trace instead of the function's return value.
162 """
--> 163 self(*args, **kwargs)
164 return self.msngr.get_trace()
~/.local/lib/python3.8/site-packages/pyro/poutine/trace_messenger.py in __call__(self, *args, **kwargs)
141 args=args, kwargs=kwargs)
142 try:
--> 143 ret = self.fn(*args, **kwargs)
144 except (ValueError, RuntimeError):
145 exc_type, exc_value, traceback = sys.exc_info()
<ipython-input-22-1dcdc695642b> in pyro_model(x, y)
2
3 def pyro_model(x, y):
----> 4 model.pyro_sample_from_prior()
5 output = model(x)
6 loss = mll.pyro_factor(output, y)
~/.local/lib/python3.8/site-packages/gpytorch/module.py in pyro_sample_from_prior(self)
287 parameters of the model that have GPyTorch priors registered to them.
288 """
--> 289 return _pyro_sample_from_prior(module=self, memo=None, prefix="")
290
291 def local_load_samples(self, samples_dict, memo, prefix):
~/.local/lib/python3.8/site-packages/gpytorch/module.py in _pyro_sample_from_prior(module, memo, prefix)
396 for mname, module_ in module.named_children():
397 submodule_prefix = prefix + ("." if prefix else "") + mname
--> 398 _pyro_sample_from_prior(module=module_, memo=memo, prefix=submodule_prefix)
399
400
~/.local/lib/python3.8/site-packages/gpytorch/module.py in _pyro_sample_from_prior(module, memo, prefix)
391 memo.add(prior)
392 prior = prior.expand(closure().shape)
--> 393 value = pyro.sample(prefix + ("." if prefix else "") + prior_name, prior)
394 setting_closure(value)
395
~/.local/lib/python3.8/site-packages/pyro/primitives.py in sample(name, fn, *args, **kwargs)
108 msg["is_observed"] = True
109 # apply the stack and return its return value
--> 110 apply_stack(msg)
111 return msg["value"]
112
~/.local/lib/python3.8/site-packages/pyro/poutine/runtime.py in apply_stack(initial_msg)
193 break
194
--> 195 default_process_message(msg)
196
197 for frame in stack[-pointer:]:
~/.local/lib/python3.8/site-packages/pyro/poutine/runtime.py in default_process_message(msg)
154 return msg
155
--> 156 msg["value"] = msg["fn"](*msg["args"], **msg["kwargs"])
157
158 # after fn has been called, update msg to prevent it from being called again.
~/.local/lib/python3.8/site-packages/pyro/distributions/torch_distribution.py in __call__(self, sample_shape)
38 :rtype: torch.Tensor
39 """
---> 40 return self.rsample(sample_shape) if self.has_rsample else self.sample(sample_shape)
41
42 @property
/usr/lib/python3.8/site-packages/torch/distributions/gamma.py in rsample(self, sample_shape)
59 def rsample(self, sample_shape=torch.Size()):
60 shape = self._extended_shape(sample_shape)
---> 61 value = _standard_gamma(self.concentration.expand(shape)) / self.rate.expand(shape)
62 value.detach().clamp_(min=torch.finfo(value.dtype).tiny) # do not record in autograd graph
63 return value
/usr/lib/python3.8/site-packages/torch/nn/modules/module.py in __getattr__(self, name)
582 if name in modules:
583 return modules[name]
--> 584 raise AttributeError("'{}' object has no attribute '{}'".format(
585 type(self).__name__, name))
586
AttributeError: 'GammaPrior' object has no attribute 'concentration'
|
AttributeError
|
def expand(self, batch_shape):
batch_shape = torch.Size(batch_shape)
return LogNormalPrior(self.loc.expand(batch_shape), self.scale.expand(batch_shape))
|
def expand(self, batch_shape):
return LogNormal.expand(self, batch_shape, _instance=self)
|
https://github.com/cornellius-gp/gpytorch/issues/976
|
Warmup: 0%| | 0/300 [00:00, ?it/s]
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-22-1dcdc695642b> in <module>
9 nuts_kernel = pyro.infer.mcmc.NUTS(pyro_model, adapt_step_size=True)
10 mcmc_run = pyro.infer.mcmc.MCMC(nuts_kernel, num_samples=100, warmup_steps=200)
---> 11 mcmc_run.run(train_x, train_y)
~/.local/lib/python3.8/site-packages/pyro/poutine/messenger.py in _context_wrap(context, fn, *args, **kwargs)
6 def _context_wrap(context, fn, *args, **kwargs):
7 with context:
----> 8 return fn(*args, **kwargs)
9
10
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/api.py in run(self, *args, **kwargs)
352 z_flat_acc = [[] for _ in range(self.num_chains)]
353 with pyro.validation_enabled(not self.disable_validation):
--> 354 for x, chain_id in self.sampler.run(*args, **kwargs):
355 if num_samples[chain_id] == 0:
356 num_samples[chain_id] += 1
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/api.py in run(self, *args, **kwargs)
161 logger = initialize_logger(logger, "", progress_bar)
162 hook_w_logging = _add_logging_hook(logger, progress_bar, self.hook)
--> 163 for sample in _gen_samples(self.kernel, self.warmup_steps, self.num_samples, hook_w_logging,
164 i if self.num_chains > 1 else None,
165 *args, **kwargs):
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/api.py in _gen_samples(kernel, warmup_steps, num_samples, hook, chain_id, *args, **kwargs)
105
106 def _gen_samples(kernel, warmup_steps, num_samples, hook, chain_id, *args, **kwargs):
--> 107 kernel.setup(warmup_steps, *args, **kwargs)
108 params = kernel.initial_params
109 # yield structure (key, value.shape) of params
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/hmc.py in setup(self, warmup_steps, *args, **kwargs)
258 self._warmup_steps = warmup_steps
259 if self.model is not None:
--> 260 self._initialize_model_properties(args, kwargs)
261 potential_energy = self.potential_fn(self.initial_params)
262 self._cache(self.initial_params, potential_energy, None)
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/hmc.py in _initialize_model_properties(self, model_args, model_kwargs)
223
224 def _initialize_model_properties(self, model_args, model_kwargs):
--> 225 init_params, potential_fn, transforms, trace = initialize_model(
226 self.model,
227 model_args,
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/util.py in initialize_model(model, model_args, model_kwargs, transforms, max_plate_nesting, jit_compile, jit_options, skip_jit_warnings, num_chains)
363 automatic_transform_enabled = False
364 if max_plate_nesting is None:
--> 365 max_plate_nesting = _guess_max_plate_nesting(model, model_args, model_kwargs)
366 # Wrap model in `poutine.enum` to enumerate over discrete latent sites.
367 # No-op if model does not have any discrete latents.
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/util.py in _guess_max_plate_nesting(model, args, kwargs)
231 """
232 with poutine.block():
--> 233 model_trace = poutine.trace(model).get_trace(*args, **kwargs)
234 sites = [site for site in model_trace.nodes.values()
235 if site["type"] == "sample"]
~/.local/lib/python3.8/site-packages/pyro/poutine/trace_messenger.py in get_trace(self, *args, **kwargs)
161 Calls this poutine and returns its trace instead of the function's return value.
162 """
--> 163 self(*args, **kwargs)
164 return self.msngr.get_trace()
~/.local/lib/python3.8/site-packages/pyro/poutine/trace_messenger.py in __call__(self, *args, **kwargs)
141 args=args, kwargs=kwargs)
142 try:
--> 143 ret = self.fn(*args, **kwargs)
144 except (ValueError, RuntimeError):
145 exc_type, exc_value, traceback = sys.exc_info()
<ipython-input-22-1dcdc695642b> in pyro_model(x, y)
2
3 def pyro_model(x, y):
----> 4 model.pyro_sample_from_prior()
5 output = model(x)
6 loss = mll.pyro_factor(output, y)
~/.local/lib/python3.8/site-packages/gpytorch/module.py in pyro_sample_from_prior(self)
287 parameters of the model that have GPyTorch priors registered to them.
288 """
--> 289 return _pyro_sample_from_prior(module=self, memo=None, prefix="")
290
291 def local_load_samples(self, samples_dict, memo, prefix):
~/.local/lib/python3.8/site-packages/gpytorch/module.py in _pyro_sample_from_prior(module, memo, prefix)
396 for mname, module_ in module.named_children():
397 submodule_prefix = prefix + ("." if prefix else "") + mname
--> 398 _pyro_sample_from_prior(module=module_, memo=memo, prefix=submodule_prefix)
399
400
~/.local/lib/python3.8/site-packages/gpytorch/module.py in _pyro_sample_from_prior(module, memo, prefix)
391 memo.add(prior)
392 prior = prior.expand(closure().shape)
--> 393 value = pyro.sample(prefix + ("." if prefix else "") + prior_name, prior)
394 setting_closure(value)
395
~/.local/lib/python3.8/site-packages/pyro/primitives.py in sample(name, fn, *args, **kwargs)
108 msg["is_observed"] = True
109 # apply the stack and return its return value
--> 110 apply_stack(msg)
111 return msg["value"]
112
~/.local/lib/python3.8/site-packages/pyro/poutine/runtime.py in apply_stack(initial_msg)
193 break
194
--> 195 default_process_message(msg)
196
197 for frame in stack[-pointer:]:
~/.local/lib/python3.8/site-packages/pyro/poutine/runtime.py in default_process_message(msg)
154 return msg
155
--> 156 msg["value"] = msg["fn"](*msg["args"], **msg["kwargs"])
157
158 # after fn has been called, update msg to prevent it from being called again.
~/.local/lib/python3.8/site-packages/pyro/distributions/torch_distribution.py in __call__(self, sample_shape)
38 :rtype: torch.Tensor
39 """
---> 40 return self.rsample(sample_shape) if self.has_rsample else self.sample(sample_shape)
41
42 @property
/usr/lib/python3.8/site-packages/torch/distributions/gamma.py in rsample(self, sample_shape)
59 def rsample(self, sample_shape=torch.Size()):
60 shape = self._extended_shape(sample_shape)
---> 61 value = _standard_gamma(self.concentration.expand(shape)) / self.rate.expand(shape)
62 value.detach().clamp_(min=torch.finfo(value.dtype).tiny) # do not record in autograd graph
63 return value
/usr/lib/python3.8/site-packages/torch/nn/modules/module.py in __getattr__(self, name)
582 if name in modules:
583 return modules[name]
--> 584 raise AttributeError("'{}' object has no attribute '{}'".format(
585 type(self).__name__, name))
586
AttributeError: 'GammaPrior' object has no attribute 'concentration'
|
AttributeError
|
def expand(self, batch_shape):
batch_shape = torch.Size(batch_shape)
return UniformPrior(self.low.expand(batch_shape), self.high.expand(batch_shape))
|
def expand(self, batch_shape):
return Uniform.expand(self, batch_shape, _instance=self)
|
https://github.com/cornellius-gp/gpytorch/issues/976
|
Warmup: 0%| | 0/300 [00:00, ?it/s]
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-22-1dcdc695642b> in <module>
9 nuts_kernel = pyro.infer.mcmc.NUTS(pyro_model, adapt_step_size=True)
10 mcmc_run = pyro.infer.mcmc.MCMC(nuts_kernel, num_samples=100, warmup_steps=200)
---> 11 mcmc_run.run(train_x, train_y)
~/.local/lib/python3.8/site-packages/pyro/poutine/messenger.py in _context_wrap(context, fn, *args, **kwargs)
6 def _context_wrap(context, fn, *args, **kwargs):
7 with context:
----> 8 return fn(*args, **kwargs)
9
10
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/api.py in run(self, *args, **kwargs)
352 z_flat_acc = [[] for _ in range(self.num_chains)]
353 with pyro.validation_enabled(not self.disable_validation):
--> 354 for x, chain_id in self.sampler.run(*args, **kwargs):
355 if num_samples[chain_id] == 0:
356 num_samples[chain_id] += 1
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/api.py in run(self, *args, **kwargs)
161 logger = initialize_logger(logger, "", progress_bar)
162 hook_w_logging = _add_logging_hook(logger, progress_bar, self.hook)
--> 163 for sample in _gen_samples(self.kernel, self.warmup_steps, self.num_samples, hook_w_logging,
164 i if self.num_chains > 1 else None,
165 *args, **kwargs):
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/api.py in _gen_samples(kernel, warmup_steps, num_samples, hook, chain_id, *args, **kwargs)
105
106 def _gen_samples(kernel, warmup_steps, num_samples, hook, chain_id, *args, **kwargs):
--> 107 kernel.setup(warmup_steps, *args, **kwargs)
108 params = kernel.initial_params
109 # yield structure (key, value.shape) of params
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/hmc.py in setup(self, warmup_steps, *args, **kwargs)
258 self._warmup_steps = warmup_steps
259 if self.model is not None:
--> 260 self._initialize_model_properties(args, kwargs)
261 potential_energy = self.potential_fn(self.initial_params)
262 self._cache(self.initial_params, potential_energy, None)
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/hmc.py in _initialize_model_properties(self, model_args, model_kwargs)
223
224 def _initialize_model_properties(self, model_args, model_kwargs):
--> 225 init_params, potential_fn, transforms, trace = initialize_model(
226 self.model,
227 model_args,
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/util.py in initialize_model(model, model_args, model_kwargs, transforms, max_plate_nesting, jit_compile, jit_options, skip_jit_warnings, num_chains)
363 automatic_transform_enabled = False
364 if max_plate_nesting is None:
--> 365 max_plate_nesting = _guess_max_plate_nesting(model, model_args, model_kwargs)
366 # Wrap model in `poutine.enum` to enumerate over discrete latent sites.
367 # No-op if model does not have any discrete latents.
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/util.py in _guess_max_plate_nesting(model, args, kwargs)
231 """
232 with poutine.block():
--> 233 model_trace = poutine.trace(model).get_trace(*args, **kwargs)
234 sites = [site for site in model_trace.nodes.values()
235 if site["type"] == "sample"]
~/.local/lib/python3.8/site-packages/pyro/poutine/trace_messenger.py in get_trace(self, *args, **kwargs)
161 Calls this poutine and returns its trace instead of the function's return value.
162 """
--> 163 self(*args, **kwargs)
164 return self.msngr.get_trace()
~/.local/lib/python3.8/site-packages/pyro/poutine/trace_messenger.py in __call__(self, *args, **kwargs)
141 args=args, kwargs=kwargs)
142 try:
--> 143 ret = self.fn(*args, **kwargs)
144 except (ValueError, RuntimeError):
145 exc_type, exc_value, traceback = sys.exc_info()
<ipython-input-22-1dcdc695642b> in pyro_model(x, y)
2
3 def pyro_model(x, y):
----> 4 model.pyro_sample_from_prior()
5 output = model(x)
6 loss = mll.pyro_factor(output, y)
~/.local/lib/python3.8/site-packages/gpytorch/module.py in pyro_sample_from_prior(self)
287 parameters of the model that have GPyTorch priors registered to them.
288 """
--> 289 return _pyro_sample_from_prior(module=self, memo=None, prefix="")
290
291 def local_load_samples(self, samples_dict, memo, prefix):
~/.local/lib/python3.8/site-packages/gpytorch/module.py in _pyro_sample_from_prior(module, memo, prefix)
396 for mname, module_ in module.named_children():
397 submodule_prefix = prefix + ("." if prefix else "") + mname
--> 398 _pyro_sample_from_prior(module=module_, memo=memo, prefix=submodule_prefix)
399
400
~/.local/lib/python3.8/site-packages/gpytorch/module.py in _pyro_sample_from_prior(module, memo, prefix)
391 memo.add(prior)
392 prior = prior.expand(closure().shape)
--> 393 value = pyro.sample(prefix + ("." if prefix else "") + prior_name, prior)
394 setting_closure(value)
395
~/.local/lib/python3.8/site-packages/pyro/primitives.py in sample(name, fn, *args, **kwargs)
108 msg["is_observed"] = True
109 # apply the stack and return its return value
--> 110 apply_stack(msg)
111 return msg["value"]
112
~/.local/lib/python3.8/site-packages/pyro/poutine/runtime.py in apply_stack(initial_msg)
193 break
194
--> 195 default_process_message(msg)
196
197 for frame in stack[-pointer:]:
~/.local/lib/python3.8/site-packages/pyro/poutine/runtime.py in default_process_message(msg)
154 return msg
155
--> 156 msg["value"] = msg["fn"](*msg["args"], **msg["kwargs"])
157
158 # after fn has been called, update msg to prevent it from being called again.
~/.local/lib/python3.8/site-packages/pyro/distributions/torch_distribution.py in __call__(self, sample_shape)
38 :rtype: torch.Tensor
39 """
---> 40 return self.rsample(sample_shape) if self.has_rsample else self.sample(sample_shape)
41
42 @property
/usr/lib/python3.8/site-packages/torch/distributions/gamma.py in rsample(self, sample_shape)
59 def rsample(self, sample_shape=torch.Size()):
60 shape = self._extended_shape(sample_shape)
---> 61 value = _standard_gamma(self.concentration.expand(shape)) / self.rate.expand(shape)
62 value.detach().clamp_(min=torch.finfo(value.dtype).tiny) # do not record in autograd graph
63 return value
/usr/lib/python3.8/site-packages/torch/nn/modules/module.py in __getattr__(self, name)
582 if name in modules:
583 return modules[name]
--> 584 raise AttributeError("'{}' object has no attribute '{}'".format(
585 type(self).__name__, name))
586
AttributeError: 'GammaPrior' object has no attribute 'concentration'
|
AttributeError
|
def expand(self, batch_shape):
batch_shape = torch.Size(batch_shape)
return GammaPrior(
self.concentration.expand(batch_shape), self.rate.expand(batch_shape)
)
|
def expand(self, batch_shape):
return Gamma.expand(self, batch_shape, _instance=self)
|
https://github.com/cornellius-gp/gpytorch/issues/976
|
Warmup: 0%| | 0/300 [00:00, ?it/s]
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-22-1dcdc695642b> in <module>
9 nuts_kernel = pyro.infer.mcmc.NUTS(pyro_model, adapt_step_size=True)
10 mcmc_run = pyro.infer.mcmc.MCMC(nuts_kernel, num_samples=100, warmup_steps=200)
---> 11 mcmc_run.run(train_x, train_y)
~/.local/lib/python3.8/site-packages/pyro/poutine/messenger.py in _context_wrap(context, fn, *args, **kwargs)
6 def _context_wrap(context, fn, *args, **kwargs):
7 with context:
----> 8 return fn(*args, **kwargs)
9
10
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/api.py in run(self, *args, **kwargs)
352 z_flat_acc = [[] for _ in range(self.num_chains)]
353 with pyro.validation_enabled(not self.disable_validation):
--> 354 for x, chain_id in self.sampler.run(*args, **kwargs):
355 if num_samples[chain_id] == 0:
356 num_samples[chain_id] += 1
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/api.py in run(self, *args, **kwargs)
161 logger = initialize_logger(logger, "", progress_bar)
162 hook_w_logging = _add_logging_hook(logger, progress_bar, self.hook)
--> 163 for sample in _gen_samples(self.kernel, self.warmup_steps, self.num_samples, hook_w_logging,
164 i if self.num_chains > 1 else None,
165 *args, **kwargs):
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/api.py in _gen_samples(kernel, warmup_steps, num_samples, hook, chain_id, *args, **kwargs)
105
106 def _gen_samples(kernel, warmup_steps, num_samples, hook, chain_id, *args, **kwargs):
--> 107 kernel.setup(warmup_steps, *args, **kwargs)
108 params = kernel.initial_params
109 # yield structure (key, value.shape) of params
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/hmc.py in setup(self, warmup_steps, *args, **kwargs)
258 self._warmup_steps = warmup_steps
259 if self.model is not None:
--> 260 self._initialize_model_properties(args, kwargs)
261 potential_energy = self.potential_fn(self.initial_params)
262 self._cache(self.initial_params, potential_energy, None)
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/hmc.py in _initialize_model_properties(self, model_args, model_kwargs)
223
224 def _initialize_model_properties(self, model_args, model_kwargs):
--> 225 init_params, potential_fn, transforms, trace = initialize_model(
226 self.model,
227 model_args,
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/util.py in initialize_model(model, model_args, model_kwargs, transforms, max_plate_nesting, jit_compile, jit_options, skip_jit_warnings, num_chains)
363 automatic_transform_enabled = False
364 if max_plate_nesting is None:
--> 365 max_plate_nesting = _guess_max_plate_nesting(model, model_args, model_kwargs)
366 # Wrap model in `poutine.enum` to enumerate over discrete latent sites.
367 # No-op if model does not have any discrete latents.
~/.local/lib/python3.8/site-packages/pyro/infer/mcmc/util.py in _guess_max_plate_nesting(model, args, kwargs)
231 """
232 with poutine.block():
--> 233 model_trace = poutine.trace(model).get_trace(*args, **kwargs)
234 sites = [site for site in model_trace.nodes.values()
235 if site["type"] == "sample"]
~/.local/lib/python3.8/site-packages/pyro/poutine/trace_messenger.py in get_trace(self, *args, **kwargs)
161 Calls this poutine and returns its trace instead of the function's return value.
162 """
--> 163 self(*args, **kwargs)
164 return self.msngr.get_trace()
~/.local/lib/python3.8/site-packages/pyro/poutine/trace_messenger.py in __call__(self, *args, **kwargs)
141 args=args, kwargs=kwargs)
142 try:
--> 143 ret = self.fn(*args, **kwargs)
144 except (ValueError, RuntimeError):
145 exc_type, exc_value, traceback = sys.exc_info()
<ipython-input-22-1dcdc695642b> in pyro_model(x, y)
2
3 def pyro_model(x, y):
----> 4 model.pyro_sample_from_prior()
5 output = model(x)
6 loss = mll.pyro_factor(output, y)
~/.local/lib/python3.8/site-packages/gpytorch/module.py in pyro_sample_from_prior(self)
287 parameters of the model that have GPyTorch priors registered to them.
288 """
--> 289 return _pyro_sample_from_prior(module=self, memo=None, prefix="")
290
291 def local_load_samples(self, samples_dict, memo, prefix):
~/.local/lib/python3.8/site-packages/gpytorch/module.py in _pyro_sample_from_prior(module, memo, prefix)
396 for mname, module_ in module.named_children():
397 submodule_prefix = prefix + ("." if prefix else "") + mname
--> 398 _pyro_sample_from_prior(module=module_, memo=memo, prefix=submodule_prefix)
399
400
~/.local/lib/python3.8/site-packages/gpytorch/module.py in _pyro_sample_from_prior(module, memo, prefix)
391 memo.add(prior)
392 prior = prior.expand(closure().shape)
--> 393 value = pyro.sample(prefix + ("." if prefix else "") + prior_name, prior)
394 setting_closure(value)
395
~/.local/lib/python3.8/site-packages/pyro/primitives.py in sample(name, fn, *args, **kwargs)
108 msg["is_observed"] = True
109 # apply the stack and return its return value
--> 110 apply_stack(msg)
111 return msg["value"]
112
~/.local/lib/python3.8/site-packages/pyro/poutine/runtime.py in apply_stack(initial_msg)
193 break
194
--> 195 default_process_message(msg)
196
197 for frame in stack[-pointer:]:
~/.local/lib/python3.8/site-packages/pyro/poutine/runtime.py in default_process_message(msg)
154 return msg
155
--> 156 msg["value"] = msg["fn"](*msg["args"], **msg["kwargs"])
157
158 # after fn has been called, update msg to prevent it from being called again.
~/.local/lib/python3.8/site-packages/pyro/distributions/torch_distribution.py in __call__(self, sample_shape)
38 :rtype: torch.Tensor
39 """
---> 40 return self.rsample(sample_shape) if self.has_rsample else self.sample(sample_shape)
41
42 @property
/usr/lib/python3.8/site-packages/torch/distributions/gamma.py in rsample(self, sample_shape)
59 def rsample(self, sample_shape=torch.Size()):
60 shape = self._extended_shape(sample_shape)
---> 61 value = _standard_gamma(self.concentration.expand(shape)) / self.rate.expand(shape)
62 value.detach().clamp_(min=torch.finfo(value.dtype).tiny) # do not record in autograd graph
63 return value
/usr/lib/python3.8/site-packages/torch/nn/modules/module.py in __getattr__(self, name)
582 if name in modules:
583 return modules[name]
--> 584 raise AttributeError("'{}' object has no attribute '{}'".format(
585 type(self).__name__, name))
586
AttributeError: 'GammaPrior' object has no attribute 'concentration'
|
AttributeError
|
def interpolate(
self,
x_grid: List[torch.Tensor],
x_target: torch.Tensor,
interp_points=range(-2, 2),
eps=1e-10,
):
if torch.is_tensor(x_grid):
x_grid = convert_legacy_grid(x_grid)
num_target_points = x_target.size(0)
num_dim = x_target.size(-1)
assert num_dim == len(x_grid)
grid_sizes = [len(x_grid[i]) for i in range(num_dim)]
# Do some boundary checking, # min/max along each dimension
x_target_max = x_target.max(0)[0]
x_target_min = x_target.min(0)[0]
grid_mins = torch.stack([x_grid[i].min() for i in range(num_dim)], dim=0).to(
x_target_min
)
grid_maxs = torch.stack([x_grid[i].max() for i in range(num_dim)], dim=0).to(
x_target_max
)
lt_min_mask = (x_target_min - grid_mins).lt(-1e-7)
gt_max_mask = (x_target_max - grid_maxs).gt(1e-7)
if lt_min_mask.sum().item():
first_out_of_range = lt_min_mask.nonzero().squeeze(1)[0].item()
raise RuntimeError(
(
"Received data that was out of bounds for the specified grid. "
"Grid bounds were ({0:.3f}, {0:.3f}), but min = {0:.3f}, "
"max = {0:.3f}"
).format(
grid_mins[first_out_of_range].item(),
grid_maxs[first_out_of_range].item(),
x_target_min[first_out_of_range].item(),
x_target_max[first_out_of_range].item(),
)
)
if gt_max_mask.sum().item():
first_out_of_range = gt_max_mask.nonzero().squeeze(1)[0].item()
raise RuntimeError(
(
"Received data that was out of bounds for the specified grid. "
"Grid bounds were ({0:.3f}, {0:.3f}), but min = {0:.3f}, "
"max = {0:.3f}"
).format(
grid_mins[first_out_of_range].item(),
grid_maxs[first_out_of_range].item(),
x_target_min[first_out_of_range].item(),
x_target_max[first_out_of_range].item(),
)
)
# Now do interpolation
interp_points = torch.tensor(
interp_points, dtype=x_grid[0].dtype, device=x_grid[0].device
)
interp_points_flip = interp_points.flip(0) # [1, 0, -1, -2]
num_coefficients = len(interp_points)
interp_values = torch.ones(
num_target_points,
num_coefficients**num_dim,
dtype=x_grid[0].dtype,
device=x_grid[0].device,
)
interp_indices = torch.zeros(
num_target_points,
num_coefficients**num_dim,
dtype=torch.long,
device=x_grid[0].device,
)
for i in range(num_dim):
num_grid_points = x_grid[i].size(0)
grid_delta = (x_grid[i][1] - x_grid[i][0]).clamp_min_(eps)
# left-bounding grid point in index space
lower_grid_pt_idxs = torch.floor((x_target[:, i] - x_grid[i][0]) / grid_delta)
# distance from that left-bounding grid point, again in index space
lower_pt_rel_dists = (
x_target[:, i] - x_grid[i][0]
) / grid_delta - lower_grid_pt_idxs
lower_grid_pt_idxs = (
lower_grid_pt_idxs - interp_points.max()
) # ends up being the left-most (relevant) pt
lower_grid_pt_idxs.detach_()
if len(lower_grid_pt_idxs.shape) == 0:
lower_grid_pt_idxs = lower_grid_pt_idxs.unsqueeze(0)
# get the interp. coeff. based on distances to interpolating points
scaled_dist = lower_pt_rel_dists.unsqueeze(-1) + interp_points_flip.unsqueeze(
-2
)
dim_interp_values = self._cubic_interpolation_kernel(scaled_dist)
# Find points who's closest lower grid point is the first grid point
# This corresponds to a boundary condition that we must fix manually.
left_boundary_pts = torch.nonzero(lower_grid_pt_idxs < 0)
num_left = len(left_boundary_pts)
if num_left > 0:
left_boundary_pts.squeeze_(1)
x_grid_first = (
x_grid[i][:num_coefficients]
.unsqueeze(1)
.t()
.expand(num_left, num_coefficients)
)
grid_targets = (
x_target.select(1, i)[left_boundary_pts]
.unsqueeze(1)
.expand(num_left, num_coefficients)
)
dists = torch.abs(x_grid_first - grid_targets)
closest_from_first = torch.min(dists, 1)[1]
for j in range(num_left):
dim_interp_values[left_boundary_pts[j], :] = 0
dim_interp_values[left_boundary_pts[j], closest_from_first[j]] = 1
lower_grid_pt_idxs[left_boundary_pts[j]] = 0
right_boundary_pts = torch.nonzero(
lower_grid_pt_idxs > num_grid_points - num_coefficients
)
num_right = len(right_boundary_pts)
if num_right > 0:
right_boundary_pts.squeeze_(1)
x_grid_last = (
x_grid[i][-num_coefficients:]
.unsqueeze(1)
.t()
.expand(num_right, num_coefficients)
)
grid_targets = x_target.select(1, i)[right_boundary_pts].unsqueeze(1)
grid_targets = grid_targets.expand(num_right, num_coefficients)
dists = torch.abs(x_grid_last - grid_targets)
closest_from_last = torch.min(dists, 1)[1]
for j in range(num_right):
dim_interp_values[right_boundary_pts[j], :] = 0
dim_interp_values[right_boundary_pts[j], closest_from_last[j]] = 1
lower_grid_pt_idxs[right_boundary_pts[j]] = (
num_grid_points - num_coefficients
)
offset = (interp_points - interp_points.min()).long().unsqueeze(-2)
dim_interp_indices = (
lower_grid_pt_idxs.long().unsqueeze(-1) + offset
) # indices of corresponding ind. pts.
n_inner_repeat = num_coefficients**i
n_outer_repeat = num_coefficients ** (num_dim - i - 1)
# index_coeff = num_grid_points ** (num_dim - i - 1) # TODO: double check
index_coeff = reduce(mul, grid_sizes[i + 1 :], 1) # Think this is right...
dim_interp_indices = dim_interp_indices.unsqueeze(-1).repeat(
1, n_inner_repeat, n_outer_repeat
)
dim_interp_values = dim_interp_values.unsqueeze(-1).repeat(
1, n_inner_repeat, n_outer_repeat
)
# compute the lexicographical position of the indices in the d-dimensional grid points
interp_indices = interp_indices.add(
dim_interp_indices.view(num_target_points, -1).mul(index_coeff)
)
interp_values = interp_values.mul(dim_interp_values.view(num_target_points, -1))
return interp_indices, interp_values
|
def interpolate(
self, x_grid: List[torch.Tensor], x_target: torch.Tensor, interp_points=range(-2, 2)
):
if torch.is_tensor(x_grid):
x_grid = convert_legacy_grid(x_grid)
num_target_points = x_target.size(0)
num_dim = x_target.size(-1)
assert num_dim == len(x_grid)
grid_sizes = [len(x_grid[i]) for i in range(num_dim)]
# Do some boundary checking, # min/max along each dimension
x_target_max = x_target.max(0)[0]
x_target_min = x_target.min(0)[0]
grid_mins = torch.stack([x_grid[i].min() for i in range(num_dim)], dim=0).to(
x_target_min
)
grid_maxs = torch.stack([x_grid[i].max() for i in range(num_dim)], dim=0).to(
x_target_max
)
lt_min_mask = (x_target_min - grid_mins).lt(-1e-7)
gt_max_mask = (x_target_max - grid_maxs).gt(1e-7)
if lt_min_mask.sum().item():
first_out_of_range = lt_min_mask.nonzero().squeeze(1)[0].item()
raise RuntimeError(
(
"Received data that was out of bounds for the specified grid. "
"Grid bounds were ({0:.3f}, {0:.3f}), but min = {0:.3f}, "
"max = {0:.3f}"
).format(
grid_mins[first_out_of_range].item(),
grid_maxs[first_out_of_range].item(),
x_target_min[first_out_of_range].item(),
x_target_max[first_out_of_range].item(),
)
)
if gt_max_mask.sum().item():
first_out_of_range = gt_max_mask.nonzero().squeeze(1)[0].item()
raise RuntimeError(
(
"Received data that was out of bounds for the specified grid. "
"Grid bounds were ({0:.3f}, {0:.3f}), but min = {0:.3f}, "
"max = {0:.3f}"
).format(
grid_mins[first_out_of_range].item(),
grid_maxs[first_out_of_range].item(),
x_target_min[first_out_of_range].item(),
x_target_max[first_out_of_range].item(),
)
)
# Now do interpolation
interp_points = torch.tensor(
interp_points, dtype=x_grid[0].dtype, device=x_grid[0].device
)
interp_points_flip = interp_points.flip(0) # [1, 0, -1, -2]
num_coefficients = len(interp_points)
interp_values = torch.ones(
num_target_points,
num_coefficients**num_dim,
dtype=x_grid[0].dtype,
device=x_grid[0].device,
)
interp_indices = torch.zeros(
num_target_points,
num_coefficients**num_dim,
dtype=torch.long,
device=x_grid[0].device,
)
for i in range(num_dim):
num_grid_points = x_grid[i].size(0)
grid_delta = x_grid[i][1] - x_grid[i][0]
# left-bounding grid point in index space
lower_grid_pt_idxs = torch.floor((x_target[:, i] - x_grid[i][0]) / grid_delta)
# distance from that left-bounding grid point, again in index space
lower_pt_rel_dists = (
x_target[:, i] - x_grid[i][0]
) / grid_delta - lower_grid_pt_idxs
lower_grid_pt_idxs = (
lower_grid_pt_idxs - interp_points.max()
) # ends up being the left-most (relevant) pt
lower_grid_pt_idxs.detach_()
if len(lower_grid_pt_idxs.shape) == 0:
lower_grid_pt_idxs = lower_grid_pt_idxs.unsqueeze(0)
# get the interp. coeff. based on distances to interpolating points
scaled_dist = lower_pt_rel_dists.unsqueeze(-1) + interp_points_flip.unsqueeze(
-2
)
dim_interp_values = self._cubic_interpolation_kernel(scaled_dist)
# Find points who's closest lower grid point is the first grid point
# This corresponds to a boundary condition that we must fix manually.
left_boundary_pts = torch.nonzero(lower_grid_pt_idxs < 0)
num_left = len(left_boundary_pts)
if num_left > 0:
left_boundary_pts.squeeze_(1)
x_grid_first = (
x_grid[i][:num_coefficients]
.unsqueeze(1)
.t()
.expand(num_left, num_coefficients)
)
grid_targets = (
x_target.select(1, i)[left_boundary_pts]
.unsqueeze(1)
.expand(num_left, num_coefficients)
)
dists = torch.abs(x_grid_first - grid_targets)
closest_from_first = torch.min(dists, 1)[1]
for j in range(num_left):
dim_interp_values[left_boundary_pts[j], :] = 0
dim_interp_values[left_boundary_pts[j], closest_from_first[j]] = 1
lower_grid_pt_idxs[left_boundary_pts[j]] = 0
right_boundary_pts = torch.nonzero(
lower_grid_pt_idxs > num_grid_points - num_coefficients
)
num_right = len(right_boundary_pts)
if num_right > 0:
right_boundary_pts.squeeze_(1)
x_grid_last = (
x_grid[i][-num_coefficients:]
.unsqueeze(1)
.t()
.expand(num_right, num_coefficients)
)
grid_targets = x_target.select(1, i)[right_boundary_pts].unsqueeze(1)
grid_targets = grid_targets.expand(num_right, num_coefficients)
dists = torch.abs(x_grid_last - grid_targets)
closest_from_last = torch.min(dists, 1)[1]
for j in range(num_right):
dim_interp_values[right_boundary_pts[j], :] = 0
dim_interp_values[right_boundary_pts[j], closest_from_last[j]] = 1
lower_grid_pt_idxs[right_boundary_pts[j]] = (
num_grid_points - num_coefficients
)
offset = (interp_points - interp_points.min()).long().unsqueeze(-2)
dim_interp_indices = (
lower_grid_pt_idxs.long().unsqueeze(-1) + offset
) # indices of corresponding ind. pts.
n_inner_repeat = num_coefficients**i
n_outer_repeat = num_coefficients ** (num_dim - i - 1)
# index_coeff = num_grid_points ** (num_dim - i - 1) # TODO: double check
index_coeff = reduce(mul, grid_sizes[i + 1 :], 1) # Think this is right...
dim_interp_indices = dim_interp_indices.unsqueeze(-1).repeat(
1, n_inner_repeat, n_outer_repeat
)
dim_interp_values = dim_interp_values.unsqueeze(-1).repeat(
1, n_inner_repeat, n_outer_repeat
)
# compute the lexicographical position of the indices in the d-dimensional grid points
interp_indices = interp_indices.add(
dim_interp_indices.view(num_target_points, -1).mul(index_coeff)
)
interp_values = interp_values.mul(dim_interp_values.view(num_target_points, -1))
return interp_indices, interp_values
|
https://github.com/cornellius-gp/gpytorch/issues/955
|
Traceback (most recent call last):
File "gp.py", line 24, in <module>
exact.train()
File "/st2/jeff/real_estate/models/gaussian_processes/exact.py", line 102, in train
loss = -mll(output, train_y).sum()
File "/st2/jeff/anaconda3/envs/jeff/lib/python3.7/site-packages/gpytorch/module.py", line 22, in __call__
outputs = self.forward(*inputs, **kwargs)
File "/st2/jeff/anaconda3/envs/jeff/lib/python3.7/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py", line 27, in forward
res = output.log_prob(target)
File "/st2/jeff/anaconda3/envs/jeff/lib/python3.7/site-packages/gpytorch/distributions/multivariate_normal.py", line 128, in log_prob
inv_quad, logdet = covar.inv_quad_logdet(inv_quad_rhs=diff.unsqueeze(-1), logdet=True)
File "/st2/jeff/anaconda3/envs/jeff/lib/python3.7/site-packages/gpytorch/lazy/batch_repeat_lazy_tensor.py", line 242, in inv_quad_logdet
inv_quad_rhs, logdet, reduce_inv_quad=False
File "/st2/jeff/anaconda3/envs/jeff/lib/python3.7/site-packages/gpytorch/lazy/lazy_tensor.py", line 1052, in inv_quad_logdet
*args,
File "/st2/jeff/anaconda3/envs/jeff/lib/python3.7/site-packages/gpytorch/functions/_inv_quad_log_det.py", line 63, in forward
preconditioner, precond_lt, logdet_correction = lazy_tsr._preconditioner()
File "/st2/jeff/anaconda3/envs/jeff/lib/python3.7/site-packages/gpytorch/lazy/added_diag_lazy_tensor.py", line 59, in _preconditioner
self._piv_chol_self = pivoted_cholesky.pivoted_cholesky(self._lazy_tensor, max_iter)
File "/st2/jeff/anaconda3/envs/jeff/lib/python3.7/site-packages/gpytorch/utils/pivoted_cholesky.py", line 19, in pivoted_cholesky
matrix_diag = matrix._approx_diag()
File "/st2/jeff/anaconda3/envs/jeff/lib/python3.7/site-packages/gpytorch/lazy/interpolated_lazy_tensor.py", line 90, in _approx_diag
left_res = left_interp(self.left_interp_indices, self.left_interp_values, base_diag_root.unsqueeze(-1))
File "/st2/jeff/anaconda3/envs/jeff/lib/python3.7/site-packages/gpytorch/utils/interpolation.py", line 187, in left_interp
res = rhs_expanded.gather(-3, interp_indices_expanded).mul(interp_values_expanded)
RuntimeError: Invalid index in gather at /tmp/pip-req-build-58y_cjjl/aten/src/TH/generic/THTensorEvenMoreMath.cpp:472
loss: 57158.71 med: 0.30, minmax: 0.30 0.30 noise: 0.56: : 0it [00:08, ?it/s]
|
RuntimeError
|
def __init__(self, mean, covariance_matrix, validate_args=False, interleaved=True):
if not torch.is_tensor(mean) and not isinstance(mean, LazyTensor):
raise RuntimeError(
"The mean of a MultitaskMultivariateNormal must be a Tensor or LazyTensor"
)
if not torch.is_tensor(covariance_matrix) and not isinstance(
covariance_matrix, LazyTensor
):
raise RuntimeError(
"The covariance of a MultitaskMultivariateNormal must be a Tensor or LazyTensor"
)
if mean.dim() < 2:
raise RuntimeError("mean should be a matrix or a batch matrix (batch mode)")
self._output_shape = mean.shape
# TODO: Instead of transpose / view operations, use a PermutationLazyTensor (see #539) to handle interleaving
self._interleaved = interleaved
if self._interleaved:
mean_mvn = mean.reshape(*mean.shape[:-2], -1)
else:
mean_mvn = mean.transpose(-1, -2).reshape(*mean.shape[:-2], -1)
super().__init__(
mean=mean_mvn, covariance_matrix=covariance_matrix, validate_args=validate_args
)
|
def __init__(self, mean, covariance_matrix, validate_args=False, interleaved=True):
"""
Constructs a multi-output multivariate Normal random variable, based on mean and covariance
Can be multi-output multivariate, or a batch of multi-output multivariate Normal
Passing a matrix mean corresponds to a multi-output multivariate Normal
Passing a matrix mean corresponds to a batch of multivariate Normals
Params:
mean (:obj:`torch.tensor`): An `n x t` or batch `b x n x t` matrix of means for the MVN distribution.
covar (:obj:`torch.tensor` or :obj:`gpytorch.lazy.LazyTensor`): An `nt x nt` or batch `b x nt x nt`
covariance matrix of MVN distribution.
validate_args (:obj:`bool`): If True, validate `mean` anad `covariance_matrix` arguments.
interleaved (:obj:`bool`): If True, covariance matrix is interpreted as block-diagonal w.r.t.
inter-task covariances for each observation. If False, it is interpreted as block-diagonal
w.r.t. inter-observation covariance for each task.
"""
if not torch.is_tensor(mean) and not isinstance(mean, LazyTensor):
raise RuntimeError(
"The mean of a MultitaskMultivariateNormal must be a Tensor or LazyTensor"
)
if not torch.is_tensor(covariance_matrix) and not isinstance(
covariance_matrix, LazyTensor
):
raise RuntimeError(
"The covariance of a MultitaskMultivariateNormal must be a Tensor or LazyTensor"
)
if mean.dim() < 2:
raise RuntimeError("mean should be a matrix or a batch matrix (batch mode)")
self._output_shape = mean.shape
# TODO: Instead of transpose / view operations, use a PermutationLazyTensor (see #539) to handle interleaving
self._interleaved = interleaved
if self._interleaved:
mean_mvn = mean.reshape(*mean.shape[:-2], -1)
else:
mean_mvn = mean.transpose(-1, -2).reshape(*mean.shape[:-2], -1)
super().__init__(
mean=mean_mvn, covariance_matrix=covariance_matrix, validate_args=validate_args
)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def from_independent_mvns(cls, mvns):
"""
Convert an iterable of MVNs into a :obj:`~gpytorch.distributions.MultitaskMultivariateNormal`.
The resulting distribution will have :attr:`len(mvns)` tasks, and the tasks will be independent.
:param ~gpytorch.distributions.MultitaskNormal mvn: The base MVN distributions.
:returns: the independent multitask distribution
:rtype: gpytorch.distributions.MultitaskMultivariateNormal
Example:
>>> # model is a gpytorch.models.VariationalGP
>>> # likelihood is a gpytorch.likelihoods.Likelihood
>>> mean = torch.randn(4, 3)
>>> covar_factor = torch.randn(4, 3, 3)
>>> covar = covar_factor @ covar_factor.transpose(-1, -2)
>>> mvn1 = gpytorch.distributions.MultivariateNormal(mean, covar)
>>>
>>> mean = torch.randn(4, 3)
>>> covar_factor = torch.randn(4, 3, 3)
>>> covar = covar_factor @ covar_factor.transpose(-1, -2)
>>> mvn2 = gpytorch.distributions.MultivariateNormal(mean, covar)
>>>
>>> mmvn = MultitaskMultivariateNormal.from_independent_mvns([mvn1, mvn2])
>>> print(mmvn.event_shape, mmvn.batch_shape)
>>> # torch.Size([3, 2]), torch.Size([4])
"""
if len(mvns) < 2:
raise ValueError(
"Must provide at least 2 MVNs to form a MultitaskMultivariateNormal"
)
if any(isinstance(mvn, MultitaskMultivariateNormal) for mvn in mvns):
raise ValueError("Cannot accept MultitaskMultivariateNormals")
if not all(m.batch_shape == mvns[0].batch_shape for m in mvns[1:]):
raise ValueError("All MultivariateNormals must have the same batch shape")
if not all(m.event_shape == mvns[0].event_shape for m in mvns[1:]):
raise ValueError("All MultivariateNormals must have the same event shape")
mean = torch.stack([mvn.mean for mvn in mvns], -1)
# TODO: To do the following efficiently, we don't want to evaluate the
# covariance matrices. Instead, we want to use the lazies directly in the
# BlockDiagLazyTensor. This will require implementing a new BatchLazyTensor:
# https://github.com/cornellius-gp/gpytorch/issues/468
covar_blocks_lazy = CatLazyTensor(
*[mvn.lazy_covariance_matrix.unsqueeze(0) for mvn in mvns],
dim=0,
output_device=mean.device,
)
covar_lazy = BlockDiagLazyTensor(covar_blocks_lazy, block_dim=0)
return cls(mean=mean, covariance_matrix=covar_lazy, interleaved=False)
|
def from_independent_mvns(cls, mvns):
if len(mvns) < 2:
raise ValueError(
"Must provide at least 2 MVNs to form a MultitaskMultivariateNormal"
)
if any(isinstance(mvn, MultitaskMultivariateNormal) for mvn in mvns):
raise ValueError("Cannot accept MultitaskMultivariateNormals")
if not all(m.batch_shape == mvns[0].batch_shape for m in mvns[1:]):
raise ValueError("All MultivariateNormals must have the same batch shape")
if not all(m.event_shape == mvns[0].event_shape for m in mvns[1:]):
raise ValueError("All MultivariateNormals must have the same event shape")
mean = torch.stack([mvn.mean for mvn in mvns], -1)
# TODO: To do the following efficiently, we don't want to evaluate the
# covariance matrices. Instead, we want to use the lazies directly in the
# BlockDiagLazyTensor. This will require implementing a new BatchLazyTensor:
# https://github.com/cornellius-gp/gpytorch/issues/468
covar_blocks_lazy = CatLazyTensor(
*[mvn.lazy_covariance_matrix.unsqueeze(0) for mvn in mvns],
dim=0,
output_device=mean.device,
)
covar_lazy = BlockDiagLazyTensor(covar_blocks_lazy, block_dim=0)
return cls(mean=mean, covariance_matrix=covar_lazy, interleaved=False)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def get_base_samples(self, sample_shape=torch.Size()):
base_samples = super().get_base_samples(sample_shape)
if not self._interleaved:
# flip shape of last two dimensions
new_shape = sample_shape + self._output_shape[:-2] + self._output_shape[:-3:-1]
return base_samples.view(new_shape).transpose(-1, -2).contiguous()
return base_samples.view(*sample_shape, *self._output_shape)
|
def get_base_samples(self, sample_shape=torch.Size()):
"""Get i.i.d. standard Normal samples (to be used with rsample(base_samples=base_samples))"""
base_samples = super().get_base_samples(sample_shape)
if not self._interleaved:
# flip shape of last two dimensions
new_shape = sample_shape + self._output_shape[:-2] + self._output_shape[:-3:-1]
return base_samples.view(new_shape).transpose(-1, -2).contiguous()
return base_samples.view(*sample_shape, *self._output_shape)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def __init__(self, mean, covariance_matrix, validate_args=False):
self._islazy = isinstance(mean, LazyTensor) or isinstance(
covariance_matrix, LazyTensor
)
if self._islazy:
if validate_args:
# TODO: add argument validation
raise NotImplementedError()
self.loc = mean
self._covar = covariance_matrix
self.__unbroadcasted_scale_tril = None
self._validate_args = validate_args
batch_shape = _mul_broadcast_shape(
self.loc.shape[:-1], covariance_matrix.shape[:-2]
)
event_shape = self.loc.shape[-1:]
# TODO: Integrate argument validation for LazyTensors into torch.distribution validation logic
super(TMultivariateNormal, self).__init__(
batch_shape, event_shape, validate_args=False
)
else:
super().__init__(
loc=mean, covariance_matrix=covariance_matrix, validate_args=validate_args
)
|
def __init__(self, mean, covariance_matrix, validate_args=False):
self._islazy = isinstance(mean, LazyTensor) or isinstance(
covariance_matrix, LazyTensor
)
if self._islazy:
if validate_args:
# TODO: add argument validation
raise NotImplementedError()
self.loc = mean
self._covar = covariance_matrix
self.__unbroadcasted_scale_tril = None
self._validate_args = validate_args
batch_shape, event_shape = self.loc.shape[:-1], self.loc.shape[-1:]
# TODO: Integrate argument validation for LazyTensors into torch.distribution validation logic
super(TMultivariateNormal, self).__init__(
batch_shape, event_shape, validate_args=False
)
else:
super().__init__(
loc=mean, covariance_matrix=covariance_matrix, validate_args=validate_args
)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def confidence_region(self):
"""
Returns 2 standard deviations above and below the mean.
:rtype: (torch.Tensor, torch.Tensor)
:return: pair of tensors of size (b x d) or (d), where
b is the batch size and d is the dimensionality of the random
variable. The first (second) Tensor is the lower (upper) end of
the confidence region.
"""
std2 = self.stddev.mul_(2)
mean = self.mean
return mean.sub(std2), mean.add(std2)
|
def confidence_region(self):
"""
Returns 2 standard deviations above and below the mean.
Returns:
Tuple[Tensor, Tensor]: pair of tensors of size (b x d) or (d), where
b is the batch size and d is the dimensionality of the random
variable. The first (second) Tensor is the lower (upper) end of
the confidence region.
"""
std2 = self.stddev.mul_(2)
mean = self.mean
return mean.sub(std2), mean.add(std2)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def get_base_samples(self, sample_shape=torch.Size()):
"""Get i.i.d. standard Normal samples (to be used with rsample(base_samples=base_samples))"""
return super().get_base_samples(sample_shape=sample_shape)
|
def get_base_samples(self, sample_shape=torch.Size()):
"""Get i.i.d. standard Normal samples (to be used with rsample(base_samples=base_samples))"""
with torch.no_grad():
shape = self._extended_shape(sample_shape)
base_samples = _standard_normal(
shape, dtype=self.loc.dtype, device=self.loc.device
)
return base_samples
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.